hash
stringlengths 40
40
| repo
stringlengths 9
36
| date
stringlengths 19
19
| license
stringclasses 3
values | message
stringlengths 86
367
| mods
listlengths 1
15
|
---|---|---|---|---|---|
6c368f596a1654a4866ab75db76759cd1f0aeb00 | appscale/gts | 22.07.2019 19:32:56 | Apache License 2.0 | Support the service_account_name parameter
This allows GetAccessTokenRequests to specify a custom service
account for generating an authentication token. | [
{
"change_type": "MODIFY",
"old_path": "APIServer/appscale/api_server/app_identity.py",
"new_path": "APIServer/appscale/api_server/app_identity.py",
"diff": "@@ -1,6 +1,10 @@\n \"\"\" Implements the App Identity API. \"\"\"\n \n+import json\n import logging\n+import time\n+import urllib\n+import urllib2\n \n from kazoo.exceptions import KazooException\n from kazoo.exceptions import NoNodeError\n@@ -12,8 +16,8 @@ from appscale.api_server import crypto\n from appscale.api_server.base_service import BaseService\n from appscale.api_server.constants import ApplicationError\n from appscale.api_server.constants import CallNotFound\n-from appscale.api_server.crypto import PrivateKey\n-from appscale.api_server.crypto import PublicCertificate\n+from appscale.api_server.crypto import (\n+ AccessToken, PrivateKey, PublicCertificate)\n from appscale.common.async_retrying import retry_children_watch_coroutine\n \n logger = logging.getLogger(__name__)\n@@ -56,20 +60,22 @@ class AppIdentityService(BaseService):\n super(AppIdentityService, self).__init__(self.SERVICE_NAME)\n \n self.project_id = project_id\n+ project_node = '/appscale/projects/{}'.format(self.project_id)\n \n self._zk_client = zk_client\n- self._key_node = '/appscale/projects/{}/private_key'.format(\n- self.project_id)\n+ self._key_node = '{}/private_key'.format(project_node)\n self._key = None\n self._ensure_private_key()\n self._zk_client.DataWatch(self._key_node, self._update_key)\n \n- self._certs_node = '/appscale/projects/{}/certificates'.format(\n- self.project_id)\n+ self._certs_node = '{}/certificates'.format(project_node)\n self._zk_client.ensure_path(self._certs_node)\n self._certs = []\n self._zk_client.ChildrenWatch(self._certs_node, self._update_certs)\n \n+ self._service_accounts_node = '{}/service_accounts'.format(\n+ project_node)\n+\n def get_public_certificates(self):\n \"\"\" Retrieves a list of valid public certificates for the project.\n \n@@ -130,8 +136,9 @@ class AppIdentityService(BaseService):\n Raises:\n UnknownError if the service account is not configured.\n \"\"\"\n- if self._key is None:\n- raise UnknownError('A private key is not configured')\n+ # TODO: Check if it makes sense to store the audience with the service\n+ # account definition.\n+ default_audience = 'https://www.googleapis.com/oauth2/v4/token'\n \n if service_account_id is not None:\n raise UnknownError(\n@@ -139,10 +146,45 @@ class AppIdentityService(BaseService):\n \n if (service_account_name is not None and\n service_account_name != self._key.key_name):\n- raise UnknownError(\n- '{} is not configured'.format(service_account_name))\n+ service_account_node = '/'.join([self._service_accounts_node,\n+ service_account_name])\n+ try:\n+ account_details = self._zk_client.get(service_account_node)[0]\n+ except NoNodeError:\n+ raise UnknownError(\n+ '{} is not configured'.format(service_account_name))\n+\n+ try:\n+ account_details = json.loads(account_details)\n+ except ValueError:\n+ raise UnknownError(\n+ '{} has invalid data'.format(service_account_node))\n+\n+ pem = account_details['private_key'].encode('utf-8')\n+ key = PrivateKey.from_pem(service_account_name, pem)\n+ assertion = key.generate_assertion(default_audience, scopes)\n+\n+ grant_type = 'urn:ietf:params:oauth:grant-type:jwt-bearer'\n+ payload = urllib.urlencode({'grant_type': grant_type,\n+ 'assertion': assertion})\n+ try:\n+ response = urllib2.urlopen(default_audience, payload)\n+ except urllib2.HTTPError as error:\n+ raise UnknownError(error.msg)\n+ except urllib2.URLError as error:\n+ raise UnknownError(error.reason)\n+\n+ token_details = json.loads(response.read())\n+ logging.info('Generated access token: {}'.format(token_details))\n+ expiration_time = int(time.time()) + token_details['expires_in']\n+ return AccessToken(token_details['access_token'], expiration_time)\n+\n+ if self._key is None:\n+ raise UnknownError('A private key is not configured')\n \n- return self._key.generate_access_token(self.project_id, scopes)\n+ assertion = self._key.generate_assertion(default_audience, scopes)\n+ # TODO: Generate access token from assertion.\n+ return AccessToken(assertion, int(time.time() + 3600))\n \n def sign(self, blob):\n \"\"\" Signs a message with the project's key.\n@@ -218,7 +260,7 @@ class AppIdentityService(BaseService):\n logger.exception('Unable to get access token')\n raise ApplicationError(service_pb.UNKNOWN_ERROR, str(error))\n \n- response.access_token = token.token\n+ response.access_token = token.token.encode('utf-8')\n response.expiration_time = token.expiration_time\n elif method == 'GetDefaultGcsBucketName':\n response.default_gcs_bucket_name = self.DEFAULT_GCS_BUCKET_NAME\n"
},
{
"change_type": "MODIFY",
"old_path": "APIServer/appscale/api_server/crypto.py",
"new_path": "APIServer/appscale/api_server/crypto.py",
"diff": "@@ -52,9 +52,8 @@ class PrivateKey(object):\n ENCODING = serialization.Encoding.PEM\n ENCRYPTION = serialization.NoEncryption()\n FORMAT = serialization.PrivateFormat.PKCS8\n- PADDING = padding.PSS(mgf=padding.MGF1(hashes.SHA256()),\n- salt_length=padding.PSS.MAX_LENGTH)\n- TOKEN_LIFETIME = 1800\n+ PADDING = padding.PKCS1v15()\n+ TOKEN_LIFETIME = 3600\n \n def __init__(self, key_name, key):\n \"\"\" Creates a new PrivateKey.\n@@ -76,7 +75,7 @@ class PrivateKey(object):\n return self.key.private_bytes(self.ENCODING, self.FORMAT,\n self.ENCRYPTION)\n \n- def generate_access_token(self, project_id, scopes):\n+ def generate_assertion(self, audience, scopes):\n \"\"\" Creates an access token signed by the key.\n \n Args:\n@@ -86,19 +85,21 @@ class PrivateKey(object):\n An AccessToken.\n \"\"\"\n def encode_part(part):\n- serialized = json.dumps(part, separators=(',', ':'))\n- return base64.urlsafe_b64encode(serialized).rstrip('=')\n+ if isinstance(part, dict):\n+ part = json.dumps(part, separators=(',', ':')).encode('utf-8')\n+\n+ return base64.urlsafe_b64encode(part).rstrip('=')\n \n header = encode_part({'typ': 'JWT', 'alg': 'RS256'})\n- expiration_time = int(time.time() + self.TOKEN_LIFETIME)\n- metadata = encode_part({'project_id': project_id,\n- 'exp': expiration_time,\n- 'scopes': scopes})\n+ current_time = int(time.time())\n+ metadata = encode_part({'iss': self.key_name,\n+ 'aud': audience,\n+ 'scope': ' '.join(scopes),\n+ 'iat': current_time,\n+ 'exp': current_time + self.TOKEN_LIFETIME})\n \n signature = self.sign('.'.join([header, metadata]))\n- encoded_sig = base64.urlsafe_b64encode(signature).rstrip('=')\n- token = '.'.join([header, metadata, encoded_sig])\n- return AccessToken(token, expiration_time)\n+ return '.'.join([header, metadata, encode_part(signature)])\n \n def sign(self, blob):\n \"\"\" Signs a given payload.\n@@ -151,6 +152,10 @@ class PrivateKey(object):\n except KeyError as error:\n raise InvalidKey(str(error))\n \n+ return cls.from_pem(key_name, pem)\n+\n+ @classmethod\n+ def from_pem(cls, key_name, pem):\n try:\n key = serialization.load_pem_private_key(\n pem, password=None, backend=default_backend())\n"
}
] |
9cc896202dc38d962c01aa2637dbc5bbc3e3dd9b | appscale/gts | 16.08.2019 11:27:44 | Apache License 2.0 | Allow custom service accounts for restore ops
This extends the datastore admin interface to allow custom
service accounts for restore operations. | [
{
"change_type": "MODIFY",
"old_path": "AppServer/google/appengine/ext/datastore_admin/backup_handler.py",
"new_path": "AppServer/google/appengine/ext/datastore_admin/backup_handler.py",
"diff": "@@ -45,7 +45,6 @@ import copy\n import cStringIO\n import datetime\n import itertools\n-import json\n import logging\n import os\n import random\n@@ -130,24 +129,6 @@ BLOBSTORE_BACKUP_DISABLED_ERROR_MSG = (\n BACKUP_RESTORE_HANDLER = __name__ + '.RestoreEntity.map'\n \n \n-def get_service_account_names():\n- \"\"\" AppScale: Fetch list of service accounts from IAM API. \"\"\"\n- project_id = app_identity.get_application_id()\n- iam_location = 'https://127.0.0.1:17441'\n- url = iam_location + '/v1/projects/{}/serviceAccounts'.format(project_id)\n- token = app_identity.get_access_token(\n- ['https://www.googleapis.com/auth/cloud-platform'])[0]\n- headers = {'Authorization': 'Bearer {}'.format(token)}\n- response = urlfetch.fetch(url, headers=headers, validate_certificate=False)\n- try:\n- accounts = json.loads(response.content)['accounts']\n- except (KeyError, ValueError):\n- raise ValueError('Invalid list of service accounts: '\n- '{}'.format(response.content))\n-\n- return tuple(account['email'] for account in accounts)\n-\n-\n def _get_gcs_path_prefix_from_params_dict(params):\n \"\"\"Returs the gcs_path_prefix from request or mapreduce dict.\n \n@@ -313,7 +294,7 @@ class ConfirmBackupHandler(webapp.RequestHandler):\n 'notreadonly_warning': notreadonly_warning,\n 'blob_warning': blob_warning,\n 'backup_name': 'datastore_backup_%s' % time.strftime('%Y_%m_%d'),\n- 'service_accounts': get_service_account_names()\n+ 'service_accounts': utils.get_service_account_names()\n }\n utils.RenderToResponse(handler, 'confirm_backup.html', template_params)\n \n@@ -425,6 +406,7 @@ class ConfirmRestoreFromBackupHandler(webapp.RequestHandler):\n 'notreadonly_warning': notreadonly_warning,\n 'original_app_warning': original_app_warning,\n 'run_as_a_service': handler.request.get('run_as_a_service'),\n+ 'service_accounts': utils.get_service_account_names(),\n }\n utils.RenderToResponse(handler, 'confirm_restore_from_backup.html',\n template_params)\n@@ -443,6 +425,12 @@ class ConfirmBackupImportHandler(webapp.RequestHandler):\n handler: the webapp.RequestHandler invoking the method\n \"\"\"\n gs_handle = handler.request.get('gs_handle')\n+\n+ # AppScale: Use custom service account if specified.\n+ account_id = handler.request.get('service_account_name', '')\n+ if not account_id:\n+ raise ValueError('Invalid service account name')\n+\n error = None if gs_handle else 'Google Cloud Storage path is missing'\n other_backup_info_files = []\n selected_backup_info_file = None\n@@ -452,7 +440,7 @@ class ConfirmBackupImportHandler(webapp.RequestHandler):\n gs_handle = gs_handle.rstrip()\n bucket_name, prefix = parse_gs_handle(gs_handle)\n validate_gcs_bucket_name(bucket_name)\n- if not is_accessible_bucket_name(bucket_name):\n+ if not is_accessible_bucket_name(bucket_name, account_id):\n raise BackupValidationError(\n 'Bucket \"%s\" is not accessible' % bucket_name)\n if prefix.endswith('.backup_info'):\n@@ -460,7 +448,8 @@ class ConfirmBackupImportHandler(webapp.RequestHandler):\n backup_info_specified = True\n elif prefix and not prefix.endswith('/'):\n prefix += '/'\n- for backup_info_file in list_bucket_files(bucket_name, prefix):\n+ for backup_info_file in list_bucket_files(bucket_name, prefix,\n+ account_id=account_id):\n backup_info_path = '/gs/%s/%s' % (bucket_name, backup_info_file)\n if backup_info_specified and backup_info_path == gs_handle:\n selected_backup_info_file = backup_info_path\n@@ -479,6 +468,7 @@ class ConfirmBackupImportHandler(webapp.RequestHandler):\n 'backup_info_specified': backup_info_specified,\n 'xsrf_token': utils.CreateXsrfToken(XSRF_ACTION),\n 'run_as_a_service': handler.request.get('run_as_a_service'),\n+ 'service_account_name': account_id,\n }\n utils.RenderToResponse(handler, 'confirm_backup_import.html',\n template_params)\n@@ -1065,7 +1055,8 @@ def _restore(backup_id, kinds, run_as_a_service, queue, mapper_params):\n if backup.filesystem == FILES_API_GS_FILESYSTEM:\n input_reader_to_use = (input_readers.__name__ +\n '.GoogleCloudStorageRecordInputReader')\n- if not is_readable_gs_handle(backup.gs_handle):\n+ if not is_readable_gs_handle(backup.gs_handle,\n+ mapper_params.get('account_id')):\n raise ValueError('Backup not readable.')\n \n if not mapper_params['files']:\n@@ -1126,23 +1117,28 @@ class DoBackupRestoreHandler(BaseDoHandler):\n def _ProcessPostRequest(self):\n \"\"\"Triggers backup restore mapper jobs and returns their ids.\"\"\"\n try:\n+ # AppScale: Use custom service account if specified.\n+ mapper_params = _get_basic_mapper_params(self)\n+ account_id = self.request.get('service_account_name', None)\n+ mapper_params['account_id'] = account_id\n+ mapper_params['tmp_account_id'] = account_id\n return [\n _restore(\n backup_id=self.request.get('backup_id'),\n kinds=self.request.get_all('kind'),\n run_as_a_service=self.request.get('run_as_a_service', False),\n queue=self.request.get('queue'),\n- mapper_params=_get_basic_mapper_params(self)\n+ mapper_params=mapper_params\n )\n ]\n except ValueError, e:\n return [('error', e.message)]\n \n \n-def _import_backup(gs_handle):\n+def _import_backup(gs_handle, account_id=None):\n \"\"\"Import backup from `gs_handle` to the current project.\"\"\"\n bucket_name, path = parse_gs_handle(gs_handle)\n- file_content = get_gs_object(bucket_name, path)\n+ file_content = get_gs_object(bucket_name, path, account_id)\n entities = parse_backup_info_file(file_content)\n original_backup_info = entities.next()\n entity = datastore.Entity(BackupInformation.kind())\n@@ -1170,8 +1166,9 @@ class BackupImportAndRestoreLinkHandler(BaseLinkHandler):\n \n def _ProcessPostRequest(self):\n \"\"\"Handler for post requests to datastore_admin/import_backup.create.\"\"\"\n+ account_id = self.request.get('service_account_name', None)\n _restore(\n- backup_id=_import_backup(self.request.get('gs_handle')),\n+ backup_id=_import_backup(self.request.get('gs_handle'), account_id),\n kinds=self.request.get_all('kind'),\n run_as_a_service=self.request.get('run_as_a_service', False),\n queue=self.request.get('queue'),\n@@ -1193,11 +1190,12 @@ class DoBackupImportHandler(BaseDoHandler):\n Import is executed and user is redirected to the base-path handler.\n \"\"\"\n gs_handle = self.request.get('gs_handle')\n+ account_id = self.request.get('service_account_name', None)\n token = self.request.get('xsrf_token')\n error = None\n if gs_handle and utils.ValidateXsrfToken(token, XSRF_ACTION):\n try:\n- backup_id = _import_backup(gs_handle)\n+ backup_id = _import_backup(gs_handle, account_id)\n except Exception, e:\n logging.exception('Failed to Import datastore backup information.')\n error = e.message\n@@ -1972,11 +1970,11 @@ def validate_gcs_bucket_name(bucket_name):\n raise BackupValidationError('Invalid bucket name \"%s\"' % bucket_name)\n \n \n-def is_accessible_bucket_name(bucket_name):\n+def is_accessible_bucket_name(bucket_name, account_id=None):\n \"\"\"Returns True if the application has access to the specified bucket.\"\"\"\n scope = config.GoogleApiScope('devstorage.read_write')\n bucket_url = config.GsBucketURL(bucket_name)\n- auth_token, _ = app_identity.get_access_token(scope)\n+ auth_token, _ = app_identity.get_access_token(scope, account_id)\n result = urlfetch.fetch(bucket_url, method=urlfetch.HEAD, headers={\n 'Authorization': 'OAuth %s' % auth_token,\n 'x-goog-api-version': '2'})\n@@ -2027,10 +2025,10 @@ def verify_bucket_writable(bucket_name, account_id=None):\n logging.warn('Failed to delete test file %s', file_name)\n \n \n-def is_readable_gs_handle(gs_handle):\n+def is_readable_gs_handle(gs_handle, account_id=None):\n \"\"\"Return True if the application can read the specified gs_handle.\"\"\"\n try:\n- with GCSUtil.open(gs_handle) as bak_file:\n+ with GCSUtil.open(gs_handle, _account_id=account_id) as bak_file:\n bak_file.read(1)\n except (cloudstorage.ForbiddenError,\n cloudstorage.NotFoundError,\n@@ -2060,7 +2058,7 @@ def validate_and_split_gcs_path(gcs_path, account_id=None):\n return bucket_name, path\n \n \n-def list_bucket_files(bucket_name, prefix, max_keys=1000):\n+def list_bucket_files(bucket_name, prefix, max_keys=1000, account_id=None):\n \"\"\"Returns a listing of of a bucket that matches the given prefix.\"\"\"\n scope = config.GoogleApiScope('devstorage.read_only')\n bucket_url = config.GsBucketURL(bucket_name)\n@@ -2069,7 +2067,7 @@ def list_bucket_files(bucket_name, prefix, max_keys=1000):\n if prefix:\n query.append(('prefix', prefix))\n url += urllib.urlencode(query)\n- auth_token, _ = app_identity.get_access_token(scope)\n+ auth_token, _ = app_identity.get_access_token(scope, account_id)\n result = urlfetch.fetch(url, method=urlfetch.GET, headers={\n 'Authorization': 'OAuth %s' % auth_token,\n 'x-goog-api-version': '2'})\n@@ -2079,12 +2077,12 @@ def list_bucket_files(bucket_name, prefix, max_keys=1000):\n raise BackupValidationError('Request to Google Cloud Storage failed')\n \n \n-def get_gs_object(bucket_name, path):\n+def get_gs_object(bucket_name, path, account_id=None):\n \"\"\"Returns a listing of of a bucket that matches the given prefix.\"\"\"\n scope = config.GoogleApiScope('devstorage.read_only')\n bucket_url = config.GsBucketURL(bucket_name)\n url = bucket_url + path\n- auth_token, _ = app_identity.get_access_token(scope)\n+ auth_token, _ = app_identity.get_access_token(scope, account_id)\n result = urlfetch.fetch(url, method=urlfetch.GET, headers={\n 'Authorization': 'OAuth %s' % auth_token,\n 'x-goog-api-version': '2'})\n"
},
{
"change_type": "MODIFY",
"old_path": "AppServer/google/appengine/ext/datastore_admin/main.py",
"new_path": "AppServer/google/appengine/ext/datastore_admin/main.py",
"diff": "@@ -182,7 +182,8 @@ class RouteByActionHandler(webapp.RequestHandler):\n 'active_operations': self.GetOperations(active=True),\n 'pending_backups': self.GetPendingBackups(),\n 'backups': self.GetBackups(),\n- 'map_reduce_path': config.MAPREDUCE_PATH + '/detail'\n+ 'map_reduce_path': config.MAPREDUCE_PATH + '/detail',\n+ 'service_accounts': utils.get_service_account_names()\n }\n utils.RenderToResponse(self, 'list_actions.html', template_params)\n \n"
},
{
"change_type": "MODIFY",
"old_path": "AppServer/google/appengine/ext/datastore_admin/templates/confirm_backup_import.html",
"new_path": "AppServer/google/appengine/ext/datastore_admin/templates/confirm_backup_import.html",
"diff": "@@ -33,6 +33,7 @@\n {% if run_as_a_service %}\n <input type=\"hidden\" name=\"run_as_a_service\" value=\"{{ run_as_a_service|escape }}\">\n {% endif %}\n+ <input type=\"hidden\" name=\"service_account_name\" value=\"{{ service_account_name }}\">\n <input type=\"Submit\" name=\"Import\" value=\"Add to backup list\">\n <input type=\"Submit\" name=\"Restore\" value=\"Restore from backup\">\n <a href=\"{{ datastore_admin_home }}\">Cancel</a>\n"
},
{
"change_type": "MODIFY",
"old_path": "AppServer/google/appengine/ext/datastore_admin/templates/confirm_restore_from_backup.html",
"new_path": "AppServer/google/appengine/ext/datastore_admin/templates/confirm_restore_from_backup.html",
"diff": "@@ -64,6 +64,19 @@\n <input type=\"text\" name=\"queue\" value=\"default\"/>\n {% endif %}\n </p>\n+ <p>\n+ <label for=\"service_account_name\">\n+ Service account name\n+ <img class=\"ae-help-icon\" src=\"{{ base_path }}/static/img/help.gif\" height=\"14\" width=\"14\" alt=\"help\"\n+ title=\"You can define additional service accounts using the dashboard.\">\n+ </label>\n+ <select id=\"service_account_name\" name=\"service_account_name\">\n+ <option value=\"\">--Choose an option--</option>\n+ {% for account_name in service_accounts %}\n+ <option value=\"{{ account_name }}\">{{ account_name }}</option>\n+ {% endfor %}\n+ </select>\n+ </p>\n <input type=\"submit\" value=\"Restore\">\n <a href=\"{{ datastore_admin_home }}\">Cancel</a>\n </form>\n"
},
{
"change_type": "MODIFY",
"old_path": "AppServer/google/appengine/ext/datastore_admin/templates/list_actions.html",
"new_path": "AppServer/google/appengine/ext/datastore_admin/templates/list_actions.html",
"diff": "@@ -240,8 +240,22 @@\n {% if run_as_a_service %}\n <input type=\"hidden\" name=\"run_as_a_service\" value=\"{{ run_as_a_service|escape }}\">\n {% endif %}\n+ <div style=\"display: block\">\n+ <label for=\"service_account_name\">\n+ Service account name\n+ <img class=\"ae-help-icon\" src=\"{{ base_path }}/static/img/help.gif\" height=\"14\" width=\"14\" alt=\"help\"\n+ title=\"You can define additional service accounts using the dashboard.\">\n+ </label>\n+ <select id=\"service_account_name\" name=\"service_account_name\">\n+ <option value=\"\">--Choose an option--</option>\n+ {% for account_name in service_accounts %}\n+ <option value=\"{{ account_name }}\">{{ account_name }}</option>\n+ {% endfor %}\n+ </select>\n+ </div>\n+ <input type=\"text\" name=\"gs_handle\" value=\"\" size=\"50\" placeholder=\"Bucket or info file\"\n+ title=\"Google Cloud Storage path of a backup info file or a bucket name\"/>\n <input type=\"submit\" name=\"action\" value=\"Import Backup Information\" title=\"Import Backup Information or restore from Google Cloud Storage\"/>\n- <input type=\"text\" name=\"gs_handle\" value=\"\" size=\"50\" title=\"Google Cloud Storage path of a backup info file or a bucket name\"/>\n </form>\n </div>\n {% if active_operations %}\n"
},
{
"change_type": "MODIFY",
"old_path": "AppServer/google/appengine/ext/datastore_admin/utils.py",
"new_path": "AppServer/google/appengine/ext/datastore_admin/utils.py",
"diff": "@@ -24,6 +24,7 @@\n \n import base64\n import datetime\n+import json\n import logging\n import os\n import random\n@@ -36,9 +37,11 @@ from google.appengine.ext.mapreduce import model\n from google.appengine.ext.mapreduce import operation as mr_operation\n from google.appengine.ext.mapreduce import util\n \n+from google.appengine.api import app_identity\n from google.appengine.api import datastore\n from google.appengine.api import datastore_errors\n from google.appengine.api import memcache\n+from google.appengine.api import urlfetch\n from google.appengine.api import users\n from google.appengine.datastore import datastore_rpc\n from google.appengine.datastore import entity_pb\n@@ -859,3 +862,21 @@ def GetKindsForCurrentNamespace(deadline):\n more_kinds = True\n logging.warning('Failed to retrieve all kinds within deadline.')\n return kind_names, more_kinds\n+\n+\n+def get_service_account_names():\n+ \"\"\" AppScale: Fetch list of service accounts from IAM API. \"\"\"\n+ project_id = app_identity.get_application_id()\n+ iam_location = 'https://127.0.0.1:17441'\n+ url = iam_location + '/v1/projects/{}/serviceAccounts'.format(project_id)\n+ token = app_identity.get_access_token(\n+ ['https://www.googleapis.com/auth/cloud-platform'])[0]\n+ headers = {'Authorization': 'Bearer {}'.format(token)}\n+ response = urlfetch.fetch(url, headers=headers, validate_certificate=False)\n+ try:\n+ accounts = json.loads(response.content)['accounts']\n+ except (KeyError, ValueError):\n+ raise ValueError('Invalid list of service accounts: '\n+ '{}'.format(response.content))\n+\n+ return tuple(account['email'] for account in accounts)\n"
}
] |
1cce41180f3cf7c2fbae686f5585a03363e4e1b9 | appscale/gts | 15.10.2019 14:31:12 | Apache License 2.0 | Add support for property metadata queries
This allows clients to request a list of property names and types
for all kinds in a given namespace. | [
{
"change_type": "MODIFY",
"old_path": "AppDB/appscale/datastore/fdb/codecs.py",
"new_path": "AppDB/appscale/datastore/fdb/codecs.py",
"diff": "@@ -33,6 +33,10 @@ POINT_CODE = 0x21\n USER_CODE = 0x24\n REFERENCE_CODE = 0x27\n \n+# These are defined for clarity when selecting property type limits.\n+MIN_INT64_CODE = INT64_ZERO_CODE - 8\n+MAX_INT64_CODE = INT64_ZERO_CODE + 8\n+\n # Ensures the shorter of two variable-length values (with identical prefixes)\n # is placed before the longer one. Otherwise, the following byte(s) could\n # determine the sort order. It also allows a decoder to find the end of the\n"
},
{
"change_type": "MODIFY",
"old_path": "AppDB/appscale/datastore/fdb/data.py",
"new_path": "AppDB/appscale/datastore/fdb/data.py",
"diff": "@@ -438,6 +438,14 @@ class DataManager(object):\n index_entry.project_id, index_entry.namespace, index_entry.path,\n encoded_entity=entity.Encode())\n raise gen.Return(version_entry)\n+ elif index_entry.kind == u'__property__':\n+ entity = index_entry.prop_result()\n+ entity.clear_entity_group()\n+ entity.mutable_entity_group()\n+ version_entry = VersionEntry(\n+ index_entry.project_id, index_entry.namespace, index_entry.path,\n+ encoded_entity=entity.Encode())\n+ raise gen.Return(version_entry)\n \n version_entry = yield self.get_version_from_path(\n tr, index_entry.project_id, index_entry.namespace, index_entry.path,\n"
},
{
"change_type": "MODIFY",
"old_path": "AppDB/appscale/datastore/fdb/indexes.py",
"new_path": "AppDB/appscale/datastore/fdb/indexes.py",
"diff": "@@ -14,6 +14,7 @@ import six\n from tornado import gen\n \n from appscale.common.unpackaged import APPSCALE_PYTHON_APPSERVER\n+from appscale.datastore.fdb import codecs\n from appscale.datastore.fdb.codecs import (\n decode_str, decode_value, encode_value, encode_versionstamp_index, Path)\n from appscale.datastore.fdb.sdk import FindIndexToUse, ListCursor\n@@ -410,6 +411,76 @@ class KindIterator(object):\n raise gen.Return(False)\n \n \n+class PropertyIterator(object):\n+ \"\"\" Iterates over a list of indexed property names for a kind. \"\"\"\n+ PROPERTY_TYPES = (u'NULL', u'INT64', u'BOOLEAN', u'STRING', u'DOUBLE',\n+ u'POINT', u'USER', u'REFERENCE')\n+\n+ def __init__(self, tr, tornado_fdb, project_dir, namespace):\n+ self._tr = tr\n+ self._tornado_fdb = tornado_fdb\n+ self._project_dir = project_dir\n+ self._namespace = namespace\n+ self._done = False\n+\n+ @gen.coroutine\n+ def next_page(self):\n+ if self._done:\n+ raise gen.Return(([], False))\n+\n+ # TODO: This can be made async.\n+ ns_dir = self._project_dir.open(\n+ self._tr, (SinglePropIndex.DIR_NAME, self._namespace))\n+ kinds = ns_dir.list(self._tr)\n+ # TODO: Check if stat entities belong in kinds.\n+ kind_dirs = [ns_dir.open(self._tr, (kind,)) for kind in kinds]\n+ results = []\n+ for kind, kind_dir in zip(kinds, kind_dirs):\n+ # TODO: This can be made async.\n+ prop_names = kind_dir.list(self._tr)\n+ for prop_name in prop_names:\n+ prop_dir = kind_dir.open(self._tr, (prop_name,))\n+ index = SinglePropIndex(prop_dir)\n+ populated_map = yield [self._populated(index, type_name)\n+ for type_name in self.PROPERTY_TYPES]\n+ populated_types = tuple(\n+ type_ for type_, populated in zip(self.PROPERTY_TYPES, populated_map)\n+ if populated)\n+ if not populated_types:\n+ continue\n+\n+ project_id = self._project_dir.get_path()[-1]\n+ path = (u'__kind__', kind, u'__property__', prop_name)\n+ properties = []\n+ for prop_type in populated_types:\n+ prop_value = entity_pb.PropertyValue()\n+ prop_value.set_stringvalue(prop_type)\n+ properties.append((u'property_representation', prop_value))\n+\n+ results.append(CompositeEntry(project_id, self._namespace, path,\n+ properties, None, None))\n+\n+ self._done = True\n+ raise gen.Return((results, False))\n+\n+ @gen.coroutine\n+ def _populated(self, prop_index, type_name):\n+ \"\"\" Checks if at least one entity exists for a given type name. \"\"\"\n+ index_slice = prop_index.type_range(type_name)\n+ # This query is reversed to increase the likelihood of getting a relevant\n+ # (not marked for GC) entry.\n+ iterator = IndexIterator(self._tr, self._tornado_fdb, prop_index,\n+ index_slice, fetch_limit=1, reverse=True,\n+ snapshot=True)\n+ while True:\n+ results, more_results = yield iterator.next_page()\n+ if results:\n+ raise gen.Return(True)\n+\n+ if not more_results:\n+ raise gen.Return(False)\n+\n+\n class MergeJoinIterator(object):\n \"\"\"\n Returns pages of index entry results from multiple ranges. It ignores\n@@ -949,6 +1020,38 @@ class SinglePropIndex(Index):\n return PropertyEntry(self.project_id, self.namespace, path, self.prop_name,\n value, commit_versionstamp, deleted_versionstamp)\n \n+ def type_range(self, type_name):\n+ \"\"\" Returns a slice that encompasses all values for a property type. \"\"\"\n+ if type_name == u'NULL':\n+ start = six.int2byte(codecs.NULL_CODE)\n+ stop = six.int2byte(codecs.NULL_CODE + 1)\n+ elif type_name == u'INT64':\n+ start = six.int2byte(codecs.MIN_INT64_CODE)\n+ stop = six.int2byte(codecs.MAX_INT64_CODE + 1)\n+ elif type_name == u'BOOLEAN':\n+ start = six.int2byte(codecs.FALSE_CODE)\n+ stop = six.int2byte(codecs.TRUE_CODE + 1)\n+ elif type_name == u'STRING':\n+ start = six.int2byte(codecs.BYTES_CODE)\n+ stop = six.int2byte(codecs.BYTES_CODE + 1)\n+ elif type_name == u'DOUBLE':\n+ start = six.int2byte(codecs.DOUBLE_CODE)\n+ stop = six.int2byte(codecs.DOUBLE_CODE + 1)\n+ elif type_name == u'POINT':\n+ start = six.int2byte(codecs.POINT_CODE)\n+ stop = six.int2byte(codecs.POINT_CODE + 1)\n+ elif type_name == u'USER':\n+ start = six.int2byte(codecs.USER_CODE)\n+ stop = six.int2byte(codecs.USER_CODE + 1)\n+ elif type_name == u'REFERENCE':\n+ start = six.int2byte(codecs.REFERENCE_CODE)\n+ stop = six.int2byte(codecs.REFERENCE_CODE + 1)\n+ else:\n+ raise InternalError(u'Unknown type name')\n+\n+ return slice(self.directory.rawPrefix + start,\n+ self.directory.rawPrefix + stop)\n+\n \n class CompositeIndex(Index):\n \"\"\"\n@@ -1171,6 +1274,10 @@ class IndexManager(object):\n project_dir = yield self._directory_cache.get(tr, (project_id,))\n raise gen.Return(KindIterator(tr, self._tornado_fdb, project_dir,\n namespace))\n+ elif query.has_kind() and query.kind() == u'__property__':\n+ project_dir = yield self._directory_cache.get(tr, (project_id,))\n+ raise gen.Return(PropertyIterator(tr, self._tornado_fdb, project_dir,\n+ namespace))\n \n index = yield self._get_perfect_index(tr, query)\n reverse = get_scan_direction(query, index) == Query_Order.DESCENDING\n"
}
] |
444e7026771cce4ece0feff9f5f3c76f57c61ed3 | xaynetwork/xaynet | 19.09.2019 17:41:41 | Apache License 2.0 | Add subdirectories for aggregates
To enable multiple aggregates in a clean way in the output directory will now contain a sub-directory for each group for which the aggregate function is called | [
{
"change_type": "MODIFY",
"old_path": "xain/benchmark/aggregation/final_task_accuracies.py",
"new_path": "xain/benchmark/aggregation/final_task_accuracies.py",
"diff": "@@ -3,6 +3,7 @@ from typing import Dict, List, Optional, Tuple\n \n from absl import app, flags, logging\n \n+from xain.helpers import storage\n from xain.types import PlotValues, XticksLabels, XticksLocations\n \n from .plot import plot\n@@ -102,7 +103,8 @@ def aggregate() -> str:\n :returns: Absolut path to saved plot\n \"\"\"\n group_name = FLAGS.group_name\n- fname = f\"plot_final_task_accuracies_{group_name}.png\"\n+ dname = storage.create_output_subdir(group_name)\n+ fname = storage.fname_with_default_dir(\"plot_final_task_accuracies.png\", dname)\n \n (data, xticks_args) = prepare_aggregation_data(group_name)\n \n"
},
{
"change_type": "MODIFY",
"old_path": "xain/benchmark/aggregation/final_task_accuracies_test.py",
"new_path": "xain/benchmark/aggregation/final_task_accuracies_test.py",
"diff": "@@ -66,8 +66,8 @@ def test_plot_final_task_accuracies(output_dir, group_name, monkeypatch):\n range(1, 12, 1),\n ),\n ]\n- fname = f\"plot_final_task_accuracies_{group_name}.png\"\n- expected_filepath = os.path.join(output_dir, fname)\n+ fname = f\"plot_final_task_accuracies.png\"\n+ expected_filepath = os.path.join(output_dir, group_name, fname)\n expected_sha1 = \"19cbae25328694a436842de89acbbf661020b4cf\"\n \n xticks_locations = range(1, 12, 1)\n"
},
{
"change_type": "MODIFY",
"old_path": "xain/benchmark/aggregation/plot.py",
"new_path": "xain/benchmark/aggregation/plot.py",
"diff": "@@ -43,7 +43,7 @@ def plot(\n \n # if fname is an absolute path use fname directly otherwise assume\n # fname is filename and prepend output_dir\n- fname_abspath = storage.get_abspath(fname, FLAGS.output_dir)\n+ fname_abspath = storage.fname_with_default_dir(fname, FLAGS.output_dir)\n \n plt.figure()\n plt.ylim(0.0, ylim_max)\n"
},
{
"change_type": "MODIFY",
"old_path": "xain/benchmark/aggregation/task_accuracies.py",
"new_path": "xain/benchmark/aggregation/task_accuracies.py",
"diff": "@@ -3,6 +3,7 @@ from typing import List, Tuple\n \n from absl import app, flags, logging\n \n+from xain.helpers import storage\n from xain.types import PlotValues\n \n from .plot import plot\n@@ -85,7 +86,8 @@ def aggregate() -> str:\n :returns: Absolut path to saved plot\n \"\"\"\n group_name = FLAGS.group_name\n- fname = f\"plot_task_accuracies_{group_name}.png\"\n+ dname = storage.create_output_subdir(group_name)\n+ fname = storage.fname_with_default_dir(\"plot_task_accuracies.png\", dname)\n \n data = prepare_aggregation_data(group_name)\n \n"
},
{
"change_type": "MODIFY",
"old_path": "xain/benchmark/aggregation/task_accuracies_test.py",
"new_path": "xain/benchmark/aggregation/task_accuracies_test.py",
"diff": "@@ -22,8 +22,8 @@ def test_plot_task_accuracies(output_dir, group_name, monkeypatch):\n range(1, 12, 1),\n ),\n ]\n- fname = f\"plot_task_accuracies_{group_name}.png\"\n- expected_filepath = os.path.join(output_dir, fname)\n+ fname = f\"plot_task_accuracies.png\"\n+ expected_filepath = os.path.join(output_dir, group_name, fname)\n expected_sha1 = \"7138bde2b95eedda6b05b665cc35a6cf204e35e1\"\n \n def mock_prepare_aggregation_data(_: str):\n"
},
{
"change_type": "MODIFY",
"old_path": "xain/generator/partition_volume_distributions.py",
"new_path": "xain/generator/partition_volume_distributions.py",
"diff": "@@ -130,16 +130,16 @@ def plot_fashion_mnist_dist():\n plt.plot(xs, np.array(dist))\n plt.legend(legend, loc=\"upper left\")\n \n- fname_abspath = storage.get_abspath(\n- \"plot_fashion_mnist_partition_volume_dist\", FLAGS.output_dir\n- )\n- plt.savefig(fname=fname_abspath, format=FORMAT)\n+ dname = storage.create_output_subdir(\"partition_volume_distributions\")\n+ fname = storage.fname_with_default_dir(\"plot_fashion_mnist\", dname)\n+\n+ plt.savefig(fname=fname, format=FORMAT)\n \n # FIXME: Matplotlib is currently using agg, which is a non-GUI\n # backend, so cannot show the figure.\n # plt.show()\n \n- return fname_abspath\n+ return fname\n \n \n def main():\n"
},
{
"change_type": "MODIFY",
"old_path": "xain/helpers/storage.py",
"new_path": "xain/helpers/storage.py",
"diff": "@@ -29,8 +29,19 @@ def listdir_recursive(dname: str, relpath=True):\n return files\n \n \n-def get_abspath(fname: str, dname: str = None) -> str:\n+def create_output_subdir(dname: str) -> str:\n+ if os.path.isabs(dname):\n+ raise Exception(\"Please provide a relative directory name\")\n \n+ dname = os.path.join(FLAGS.output_dir, dname)\n+\n+ os.makedirs(dname, exist_ok=True)\n+\n+ return dname\n+\n+\n+def fname_with_default_dir(fname: str, dname: str = None) -> str:\n+ \"\"\"Returns fname if its a absolute path otherwise joins it with dname\"\"\"\n if os.path.isabs(fname):\n return fname\n \n@@ -41,12 +52,12 @@ def get_abspath(fname: str, dname: str = None) -> str:\n \n \n def write_json(results: Dict, fname: str):\n- fname = get_abspath(fname, FLAGS.output_dir)\n+ fname = fname_with_default_dir(fname, FLAGS.output_dir)\n with open(fname, \"w\") as outfile:\n json.dump(results, outfile, indent=2, sort_keys=True)\n \n \n def read_json(fname: str):\n- fname = get_abspath(fname, FLAGS.output_dir)\n+ fname = fname_with_default_dir(fname, FLAGS.output_dir)\n with open(fname, \"r\") as outfile:\n return json.loads(outfile.read())\n"
},
{
"change_type": "MODIFY",
"old_path": "xain/helpers/storage_test.py",
"new_path": "xain/helpers/storage_test.py",
"diff": "@@ -3,25 +3,25 @@ import os\n from . import storage\n \n \n-def test_get_abspath_fname_with_absolute_path():\n+def test_fname_with_default_dir_absolute_path():\n # Prepare\n fname = \"/my/absolute/path/myfile\"\n expected_abspath = fname\n \n # Execute\n- actual_abspath = storage.get_abspath(fname)\n+ actual_abspath = storage.fname_with_default_dir(fname)\n \n # Assert\n assert expected_abspath == actual_abspath\n \n \n-def test_get_abspath_fname_only_filename(output_dir):\n+def test_fname_with_default_dir_relative_path(output_dir):\n # Prepare\n fname = \"myfile\"\n expected_abspath = os.path.join(output_dir, fname)\n \n # Execute\n- actual_abspath = storage.get_abspath(fname, output_dir)\n+ actual_abspath = storage.fname_with_default_dir(fname, output_dir)\n \n # Assert\n assert expected_abspath == actual_abspath\n"
}
] |
7c1a73370bd6ffb091dbc7cb811ee447f6e176aa | armmbed/mbed-crypto | 05.03.2019 16:25:38 | Apache License 2.0 | Add RepoVersion class to make handling of many arguments easier
There are a number of arguments being passed around, nearly all of
which are duplicated between the old and new versions. Moving these
into a separate class should hopefully make it simpler to follow
what is being done. | [
{
"change_type": "MODIFY",
"old_path": "scripts/abi_check.py",
"new_path": "scripts/abi_check.py",
"diff": "@@ -28,23 +28,37 @@ import fnmatch\n import xml.etree.ElementTree as ET\n \n \n+class RepoVersion(object):\n+\n+ def __init__(self, version, repository, revision,\n+ crypto_repository, crypto_revision):\n+ \"\"\"Class containing details for a particular revision.\n+\n+ version: either 'old' or 'new'\n+ repository: repository for git revision\n+ revision: git revision for comparison\n+ crypto_repository: repository for git revision of crypto submodule\n+ crypto_revision: git revision of crypto submodule\n+ \"\"\"\n+ self.version = version\n+ self.repository = repository\n+ self.revision = revision\n+ self.crypto_repository = crypto_repository\n+ self.crypto_revision = crypto_revision\n+ self.abi_dumps = {}\n+ self.modules = {}\n+\n+\n class AbiChecker(object):\n \"\"\"API and ABI checker.\"\"\"\n \n- def __init__(self, report_dir, old_repo, old_rev, old_crypto_rev,\n- old_crypto_repo, new_repo, new_rev, new_crypto_rev,\n- new_crypto_repo, keep_all_reports, brief, skip_file=None):\n+ def __init__(self, old_version, new_version, report_dir,\n+ keep_all_reports, brief, skip_file=None):\n \"\"\"Instantiate the API/ABI checker.\n \n+ old_version: RepoVersion containing details to compare against\n+ new_version: RepoVersion containing details to check\n report_dir: directory for output files\n- old_repo: repository for git revision to compare against\n- old_rev: reference git revision to compare against\n- old_crypto_rev: reference git revision for old crypto submodule\n- old_crypto_repo: repository for git revision for old crypto submodule\n- new_repo: repository for git revision to check\n- new_rev: git revision to check\n- new_crypto_rev: reference git revision for new crypto submodule\n- new_crypto_repo: repository for git revision for new crypto submodule\n keep_all_reports: if false, delete old reports\n brief: if true, output shorter report to stdout\n skip_file: path to file containing symbols and types to skip\n@@ -56,19 +70,10 @@ class AbiChecker(object):\n self.keep_all_reports = keep_all_reports\n self.can_remove_report_dir = not (os.path.isdir(self.report_dir) or\n keep_all_reports)\n- self.old_repo = old_repo\n- self.old_rev = old_rev\n- self.old_crypto_rev = old_crypto_rev\n- self.old_crypto_repo = old_crypto_repo\n- self.new_repo = new_repo\n- self.new_rev = new_rev\n- self.new_crypto_rev = new_crypto_rev\n- self.new_crypto_repo = new_crypto_repo\n+ self.old_version = old_version\n+ self.new_version = new_version\n self.skip_file = skip_file\n self.brief = brief\n- self.mbedtls_modules = {\"old\": {}, \"new\": {}}\n- self.old_dumps = {}\n- self.new_dumps = {}\n self.git_command = \"git\"\n self.make_command = \"make\"\n \n@@ -90,18 +95,19 @@ class AbiChecker(object):\n if not shutil.which(command):\n raise Exception(\"{} not installed, aborting\".format(command))\n \n- def get_clean_worktree_for_git_revision(self, remote_repo, git_rev):\n- \"\"\"Make a separate worktree with git_rev checked out.\n+ def get_clean_worktree_for_git_revision(self, version):\n+ \"\"\"Make a separate worktree with version.revision checked out.\n Do not modify the current worktree.\"\"\"\n git_worktree_path = tempfile.mkdtemp()\n- if remote_repo:\n+ if version.repository:\n self.log.info(\n \"Checking out git worktree for revision {} from {}\".format(\n- git_rev, remote_repo\n+ version.revision, version.repository\n )\n )\n fetch_process = subprocess.Popen(\n- [self.git_command, \"fetch\", remote_repo, git_rev],\n+ [self.git_command, \"fetch\",\n+ version.repository, version.revision],\n cwd=self.repo_path,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT\n@@ -112,10 +118,10 @@ class AbiChecker(object):\n raise Exception(\"Fetching revision failed, aborting\")\n worktree_rev = \"FETCH_HEAD\"\n else:\n- self.log.info(\n- \"Checking out git worktree for revision {}\".format(git_rev)\n- )\n- worktree_rev = git_rev\n+ self.log.info(\"Checking out git worktree for revision {}\".format(\n+ version.revision\n+ ))\n+ worktree_rev = version.revision\n worktree_process = subprocess.Popen(\n [self.git_command, \"worktree\", \"add\", \"--detach\",\n git_worktree_path, worktree_rev],\n@@ -129,8 +135,7 @@ class AbiChecker(object):\n raise Exception(\"Checking out worktree failed, aborting\")\n return git_worktree_path\n \n- def update_git_submodules(self, git_worktree_path, crypto_repo,\n- crypto_rev):\n+ def update_git_submodules(self, git_worktree_path, version):\n process = subprocess.Popen(\n [self.git_command, \"submodule\", \"update\", \"--init\", '--recursive'],\n cwd=git_worktree_path,\n@@ -142,14 +147,14 @@ class AbiChecker(object):\n if process.returncode != 0:\n raise Exception(\"git submodule update failed, aborting\")\n if not (os.path.exists(os.path.join(git_worktree_path, \"crypto\"))\n- and crypto_rev):\n+ and version.crypto_revision):\n return\n \n- if crypto_repo:\n+ if version.crypto_repository:\n shutil.rmtree(os.path.join(git_worktree_path, \"crypto\"))\n clone_process = subprocess.Popen(\n- [self.git_command, \"clone\", crypto_repo,\n- \"--branch\", crypto_rev, \"crypto\"],\n+ [self.git_command, \"clone\", version.crypto_repository,\n+ \"--branch\", version.crypto_revision, \"crypto\"],\n cwd=git_worktree_path,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT\n@@ -160,7 +165,7 @@ class AbiChecker(object):\n raise Exception(\"git clone failed, aborting\")\n else:\n checkout_process = subprocess.Popen(\n- [self.git_command, \"checkout\", crypto_rev],\n+ [self.git_command, \"checkout\", version.crypto_revision],\n cwd=os.path.join(git_worktree_path, \"crypto\"),\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT\n@@ -187,29 +192,28 @@ class AbiChecker(object):\n self.log.info(make_output.decode(\"utf-8\"))\n for root, dirs, files in os.walk(git_worktree_path):\n for file in fnmatch.filter(files, \"*.so\"):\n- self.mbedtls_modules[version][os.path.splitext(file)[0]] = (\n+ version.modules[os.path.splitext(file)[0]] = (\n os.path.join(root, file)\n )\n if make_process.returncode != 0:\n raise Exception(\"make failed, aborting\")\n \n- def get_abi_dumps_from_shared_libraries(self, git_ref, git_worktree_path,\n+ def get_abi_dumps_from_shared_libraries(self, git_worktree_path,\n version):\n \"\"\"Generate the ABI dumps for the specified git revision.\n It must be checked out in git_worktree_path and the shared libraries\n must have been built.\"\"\"\n- abi_dumps = {}\n- for mbed_module, module_path in self.mbedtls_modules[version].items():\n+ for mbed_module, module_path in version.modules.items():\n output_path = os.path.join(\n- self.report_dir, version, \"{}-{}.dump\".format(\n- mbed_module, git_ref\n+ self.report_dir, version.version, \"{}-{}.dump\".format(\n+ mbed_module, version.revision\n )\n )\n abi_dump_command = [\n \"abi-dumper\",\n module_path,\n \"-o\", output_path,\n- \"-lver\", git_ref\n+ \"-lver\", version.revision\n ]\n abi_dump_process = subprocess.Popen(\n abi_dump_command,\n@@ -220,8 +224,7 @@ class AbiChecker(object):\n self.log.info(abi_dump_output.decode(\"utf-8\"))\n if abi_dump_process.returncode != 0:\n raise Exception(\"abi-dumper failed, aborting\")\n- abi_dumps[mbed_module] = output_path\n- return abi_dumps\n+ version.abi_dumps[mbed_module] = output_path\n \n def cleanup_worktree(self, git_worktree_path):\n \"\"\"Remove the specified git worktree.\"\"\"\n@@ -237,19 +240,13 @@ class AbiChecker(object):\n if worktree_process.returncode != 0:\n raise Exception(\"Worktree cleanup failed, aborting\")\n \n- def get_abi_dump_for_ref(self, remote_repo, git_rev, crypto_repo,\n- crypto_rev, version):\n+ def get_abi_dump_for_ref(self, version):\n \"\"\"Generate the ABI dumps for the specified git revision.\"\"\"\n- git_worktree_path = self.get_clean_worktree_for_git_revision(\n- remote_repo, git_rev\n- )\n- self.update_git_submodules(git_worktree_path, crypto_repo, crypto_rev)\n+ git_worktree_path = self.get_clean_worktree_for_git_revision(version)\n+ self.update_git_submodules(git_worktree_path, version)\n self.build_shared_libraries(git_worktree_path, version)\n- abi_dumps = self.get_abi_dumps_from_shared_libraries(\n- git_rev, git_worktree_path, version\n- )\n+ self.get_abi_dumps_from_shared_libraries(git_worktree_path, version)\n self.cleanup_worktree(git_worktree_path)\n- return abi_dumps\n \n def remove_children_with_tag(self, parent, tag):\n children = parent.getchildren()\n@@ -275,19 +272,20 @@ class AbiChecker(object):\n be available.\"\"\"\n compatibility_report = \"\"\n compliance_return_code = 0\n- shared_modules = list(set(self.mbedtls_modules[\"old\"].keys()) &\n- set(self.mbedtls_modules[\"new\"].keys()))\n+ shared_modules = list(set(self.old_version.modules.keys()) &\n+ set(self.new_version.modules.keys()))\n for mbed_module in shared_modules:\n output_path = os.path.join(\n self.report_dir, \"{}-{}-{}.html\".format(\n- mbed_module, self.old_rev, self.new_rev\n+ mbed_module, self.old_version.revision,\n+ self.new_version.revision\n )\n )\n abi_compliance_command = [\n \"abi-compliance-checker\",\n \"-l\", mbed_module,\n- \"-old\", self.old_dumps[mbed_module],\n- \"-new\", self.new_dumps[mbed_module],\n+ \"-old\", self.old_version.abi_dumps[mbed_module],\n+ \"-new\", self.new_version.abi_dumps[mbed_module],\n \"-strict\",\n \"-report-path\", output_path,\n ]\n@@ -329,8 +327,8 @@ class AbiChecker(object):\n \"abi-compliance-checker failed with a return code of {},\"\n \" aborting\".format(abi_compliance_process.returncode)\n )\n- os.remove(self.old_dumps[mbed_module])\n- os.remove(self.new_dumps[mbed_module])\n+ os.remove(self.old_version.abi_dumps[mbed_module])\n+ os.remove(self.new_version.abi_dumps[mbed_module])\n if self.can_remove_report_dir:\n os.rmdir(self.report_dir)\n self.log.info(compatibility_report)\n@@ -341,12 +339,8 @@ class AbiChecker(object):\n between self.old_rev and self.new_rev.\"\"\"\n self.check_repo_path()\n self.check_abi_tools_are_installed()\n- self.old_dumps = self.get_abi_dump_for_ref(self.old_repo, self.old_rev,\n- self.old_crypto_repo,\n- self.old_crypto_rev, \"old\")\n- self.new_dumps = self.get_abi_dump_for_ref(self.new_repo, self.new_rev,\n- self.new_crypto_repo,\n- self.new_crypto_rev, \"new\")\n+ self.get_abi_dump_for_ref(self.old_version)\n+ self.get_abi_dump_for_ref(self.new_version)\n return self.get_abi_compatibility_report()\n \n \n@@ -412,12 +406,13 @@ def run_main():\n help=\"output only the list of issues to stdout, instead of a full report\",\n )\n abi_args = parser.parse_args()\n+ old_version = RepoVersion(\"old\", abi_args.old_repo, abi_args.old_rev,\n+ abi_args.old_crypto_repo, abi_args.old_crypto_rev)\n+ new_version = RepoVersion(\"new\", abi_args.new_repo, abi_args.new_rev,\n+ abi_args.new_crypto_repo, abi_args.new_crypto_rev)\n abi_check = AbiChecker(\n- abi_args.report_dir, abi_args.old_repo, abi_args.old_rev,\n- abi_args.old_crypto_rev, abi_args.old_crypto_repo,\n- abi_args.new_repo, abi_args.new_rev, abi_args.new_crypto_rev,\n- abi_args.new_crypto_repo, abi_args.keep_all_reports,\n- abi_args.brief, abi_args.skip_file\n+ old_version, new_version, abi_args.report_dir,\n+ abi_args.keep_all_reports, abi_args.brief, abi_args.skip_file\n )\n return_code = abi_check.check_for_abi_changes()\n sys.exit(return_code)\n"
}
] |
a16d89f4b043ea07c6a936646924ac881e2b183b | nextstrain/ncov | 19.02.2021 13:17:57 | MIT License | Improve combine metadata script
1. More detailed help message
2. Helpful errors printed if assertions fail
3. Encoding of metadata origin now uses explicit "yes"/"no" values
rather than using the empty string. | [
{
"change_type": "MODIFY",
"old_path": "scripts/combine_metadata.py",
"new_path": "scripts/combine_metadata.py",
"diff": "@@ -2,6 +2,7 @@ import argparse\n from augur.utils import read_metadata\n from Bio import SeqIO\n import csv\n+import sys\n \n EMPTY = ''\n \n@@ -18,25 +19,33 @@ EMPTY = ''\n \n def parse_args():\n parser = argparse.ArgumentParser(\n- description=\"Custom script to combine metadata files\",\n+ description=\"\"\"\n+ Custom script to combine metadata files from different origins.\n+ In the case where metadata files specify different values, the latter provided file will take priority.\n+ Columns will be added for each origin with values \"yes\" or \"no\" to identify the input source (origin) of each sample.\n+ \"\"\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter\n )\n parser.add_argument('--metadata', required=True, nargs='+', metavar=\"TSV\", help=\"Metadata files\")\n- parser.add_argument('--origins', required=True, nargs='+', metavar=\"STR\", help=\"Names of origins (metadata columns will be created from these)\")\n- parser.add_argument('--output', required=True, metavar=\"TSV\", help=\"output (merged) metadata\")\n+ parser.add_argument('--origins', required=True, nargs='+', metavar=\"STR\", help=\"Names of origins (order should match provided metadata)\")\n+ parser.add_argument('--output', required=True, metavar=\"TSV\", help=\"Output (merged) metadata\")\n args = parser.parse_args()\n return args\n \n if __name__ == '__main__':\n args = parse_args()\n- assert(len(args.metadata)==len(args.origins))\n- assert(len(args.origins)>1)\n+ try:\n+ assert(len(args.metadata)==len(args.origins))\n+ assert(len(args.origins)>1)\n+ except AssertionError:\n+ print(\"Error. Please check your inputs - there must be the same number of metadata files as origins provided, and there must be more than one of each!\")\n+ sys.exit(2)\n \n # READ IN METADATA FILES\n metadata = []\n for (origin, fname) in zip(args.origins, args.metadata):\n data, columns = read_metadata(fname)\n- metadata.append({'origin': origin, \"fname\": fname, 'data': data, 'columns': columns})\n+ metadata.append({'origin': origin, \"fname\": fname, 'data': data, 'columns': columns, 'strains': {s for s in data.keys()}})\n \n # SUMMARISE INPUT METADATA\n print(f\"Parsed {len(metadata)} metadata TSVs\")\n@@ -54,8 +63,8 @@ if __name__ == '__main__':\n for strain in combined_data:\n for column in combined_columns:\n if column not in combined_data[strain]:\n- combined_data[strain][column] = EMPTY\n- combined_data[strain][metadata[0]['origin']] = \"yes\" # can't use `True` as booleans cause issues for `augur filter`\n+ combined_data[strain][column] = EMPTY \n+ \n for idx in range(1, len(metadata)):\n for strain, row in metadata[idx]['data'].items():\n if strain not in combined_data:\n@@ -69,7 +78,13 @@ if __name__ == '__main__':\n if existing_value != EMPTY:\n print(f\"[{strain}::{column}] Overwriting {combined_data[strain][column]} with {new_value}\")\n combined_data[strain][column] = new_value\n- combined_data[strain][metadata[idx]['origin']] = \"yes\"\n+\n+ # one-hot encoding for origin\n+ # note that we use \"yes\" / \"no\" here as Booleans are problematic for `augur filter`\n+ for metadata_entry in metadata:\n+ origin = metadata_entry['origin']\n+ for strain in combined_data:\n+ combined_data[strain][origin] = \"yes\" if strain in metadata_entry['strains'] else \"no\"\n \n print(f\"Combined metadata: {len(combined_data.keys())} strains x {len(combined_columns)} columns\")\n \n"
}
] |
b1e5407c503fc067da452d6d65f503d018849252 | nextstrain/ncov | 20.09.2021 14:25:04 | MIT License | Refactor sanitizer transform logic
Move logic to parse mapping of old to new column names and strip
prefixes into their own functions with tests. This refactoring
simplifies the code in the main body of the sanitizer script. | [
{
"change_type": "MODIFY",
"old_path": "scripts/sanitize_metadata.py",
"new_path": "scripts/sanitize_metadata.py",
"diff": "@@ -16,6 +16,39 @@ LOCATION_FIELDS = (\n )\n \n \n+def parse_new_column_names(renaming_rules):\n+ \"\"\"Parse the mapping of current to new column names from the given list of renaming rules.\n+\n+ Parameters\n+ ----------\n+ renaming_rules : list[str]\n+ A list of strings mapping an old column name to a new one delimited by an equal symbol (e.g., \"old_column=new_column\").\n+\n+ Returns\n+ -------\n+ dict :\n+ A mapping of new column names for each old column name.\n+\n+ >>> parse_new_column_names([\"old=new\", \"new=old\"])\n+ {'old': 'new', 'new': 'old'}\n+ >>> parse_new_column_names([\"old->new\"])\n+ {}\n+\n+ \"\"\"\n+ new_column_names = {}\n+ for rule in renaming_rules:\n+ if \"=\" in rule:\n+ old_column, new_column = rule.split(\"=\")\n+ new_column_names[old_column] = new_column\n+ else:\n+ print(\n+ f\"WARNING: missing mapping of old to new column in form of 'Virus name=strain' for rule: '{rule}'.\",\n+ file=sys.stderr\n+ )\n+\n+ return new_column_names\n+\n+\n def parse_location_string(location_string, location_fields):\n \"\"\"Parse location string from GISAID into the given separate geographic scales\n and return a dictionary of parse values by scale.\n@@ -69,6 +102,35 @@ def parse_location_string(location_string, location_fields):\n return locations\n \n \n+def strip_prefixes(strain_name, prefixes):\n+ \"\"\"Strip the given prefixes from the given strain name.\n+\n+ Parameters\n+ ----------\n+ strain_name : str\n+ Name of a strain to be sanitized\n+ prefixes : list[str]\n+ A list of prefixes to be stripped from the strain name.\n+\n+ Returns\n+ -------\n+ str :\n+ Strain name without any of the given prefixes.\n+\n+\n+ >>> strip_prefixes(\"hCoV-19/RandomStrain/1/2020\", [\"hCoV-19/\", \"SARS-CoV-2/\"])\n+ 'RandomStrain/1/2020'\n+ >>> strip_prefixes(\"SARS-CoV-2/RandomStrain/2/2020\", [\"hCoV-19/\", \"SARS-CoV-2/\"])\n+ 'RandomStrain/2/2020'\n+ >>> strip_prefixes(\"hCoV-19/RandomStrain/1/2020\", [\"SARS-CoV-2/\"])\n+ 'hCoV-19/RandomStrain/1/2020'\n+\n+ \"\"\"\n+ joined_prefixes = \"|\".join(prefixes)\n+ pattern = f\"^({joined_prefixes})\"\n+ return re.sub(pattern, \"\", strain_name)\n+\n+\n def resolve_duplicates(metadata, strain_field, database_id_columns, error_on_duplicates=False):\n \"\"\"Resolve duplicate records for a given strain field and return a deduplicated\n data frame. This approach chooses the record with the most recent database\n@@ -214,18 +276,8 @@ if __name__ == '__main__':\n axis=1\n ).drop(columns=[args.parse_location_field])\n \n- new_column_names = {}\n- if args.rename_fields:\n- # Rename specific columns using rules like \"Virus name=strain\".\n- for rule in args.rename_fields:\n- if \"=\" in rule:\n- old_column, new_column = rule.split(\"=\")\n- new_column_names[old_column] = new_column\n- else:\n- print(\n- f\"WARNING: missing mapping of old to new column in form of 'Virus name=strain' for rule: '{rule}'.\",\n- file=sys.stderr\n- )\n+ # Parse mapping of old column names to new.\n+ new_column_names = parse_new_column_names(args.rename_fields)\n \n # Rename columns as needed.\n if len(new_column_names) > 0:\n@@ -247,15 +299,8 @@ if __name__ == '__main__':\n sys.exit(1)\n \n if args.strip_prefixes:\n- prefixes = \"|\".join(args.strip_prefixes)\n- pattern = f\"^({prefixes})\"\n-\n metadata[strain_field] = metadata[strain_field].apply(\n- lambda strain: re.sub(\n- pattern,\n- \"\",\n- strain\n- )\n+ lambda strain: strip_prefixes(strain, args.strip_prefixes)\n )\n \n # Replace whitespaces from strain names with underscores to match GISAID's\n"
}
] |
dbffb035f72de8d43e5f04fc1ea0ce1b0da21e7a | teemtee/tmt | 05.03.2020 14:21:24 | MIT License | Support selecting objects under the current folder
Add a special single dot notation for filtering stories, plans and
tests under the current working directory. Update documentation
and usage messages accordingly. | [
{
"change_type": "MODIFY",
"old_path": "docs/examples.rst",
"new_path": "docs/examples.rst",
"diff": "@@ -152,6 +152,12 @@ condition::\n $ tmt tests ls --condition 'tier > 0'\n /tests/ls\n \n+In order to select tests under the current working directory use\n+the single dot notation::\n+\n+ $ tmt test show .\n+ $ tmt run test --name .\n+\n \n Lint Tests\n ------------------------------------------------------------------\n@@ -345,6 +351,7 @@ inheritance and merging attributes.\n \n .. _fmf features: https://fmf.readthedocs.io/en/latest/features.html\n \n+\n Stories\n ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n \n@@ -420,6 +427,11 @@ available for binary status filtering::\n documented /tmt/cli\n ...\n \n+In order to select stories under the current working directory use\n+the single dot notation::\n+\n+ $ tmt story show .\n+\n \n Story Coverage\n ------------------------------------------------------------------\n"
},
{
"change_type": "MODIFY",
"old_path": "stories/cli/plan.fmf",
"new_path": "stories/cli/plan.fmf",
"diff": "@@ -15,7 +15,11 @@ story: 'As a user I want to comfortably work with plans'\n \n /filter:\n story: 'Filter available plans'\n+ description:\n+ Search plans using a regular expression or a filter.\n+ Use ``.`` to select plans under the current directory.\n example:\n+ - tmt plan ls .\n - tmt plan ls REGEXP\n - tmt plan show --filter artifact:build\n implemented: /tmt/base.py\n"
},
{
"change_type": "MODIFY",
"old_path": "stories/cli/story.fmf",
"new_path": "stories/cli/story.fmf",
"diff": "@@ -15,13 +15,19 @@ story: 'As a developer I want to comfortably work with stories'\n \n /filter:\n story: 'Search available stories'\n+ description:\n+ Search stories using a regular expression, a filter or\n+ coverage status. Use ``.`` to select stories under the\n+ current directory.\n example:\n- - tmt story --implemented\n- - tmt story --unimplemented\n- - tmt story --tested\n- - tmt story --untested\n- - tmt story --documented\n- - tmt story --undocumented\n+ - tmt story ls .\n+ - tmt story ls REGEXP\n+ - tmt story ls --implemented\n+ - tmt story ls --unimplemented\n+ - tmt story ls --tested\n+ - tmt story ls --untested\n+ - tmt story ls --documented\n+ - tmt story ls --undocumented\n implemented: /tmt/cli\n documented: /docs/examples#filter-stories\n \n"
},
{
"change_type": "MODIFY",
"old_path": "stories/cli/test.fmf",
"new_path": "stories/cli/test.fmf",
"diff": "@@ -107,7 +107,11 @@ story: 'As a user I want to comfortably work with tests'\n \n /filter:\n story: 'Filter available tests'\n+ description:\n+ Search tests using a regular expression or a filter.\n+ Use ``.`` to select tests under the current directory.\n example:\n+ - tmt test ls .\n - tmt test ls REGEXP\n - tmt test show --filter tier:1\n - tmt test show --condition 'tier < 2'\n"
},
{
"change_type": "MODIFY",
"old_path": "tmt/base.py",
"new_path": "tmt/base.py",
"diff": "@@ -48,6 +48,25 @@ class Node(tmt.utils.Common):\n echo(tmt.utils.format(\n 'sources', self.node.sources, key_color='magenta'))\n \n+ @classmethod\n+ def _save_context(cls, context):\n+ \"\"\" Save provided command line context for future use \"\"\"\n+ super(Node, cls)._save_context(context)\n+\n+ # Handle '.' as an alias for the current working directory\n+ names = cls._opt('names')\n+ if names is not None and '.' in names:\n+ root = context.obj.tree.root\n+ current = os.getcwd()\n+ # Handle special case when directly in the metadata root\n+ if current == root:\n+ path = '/'\n+ # Prepare path from the tree root to the current directory\n+ else:\n+ path = os.path.join('/', os.path.relpath(current, root))\n+ cls._context.params['names'] = (\n+ path if name == '.' else name for name in names)\n+\n def name_and_summary(self):\n \"\"\" Node name and optional summary \"\"\"\n if self.summary:\n"
},
{
"change_type": "MODIFY",
"old_path": "tmt/cli.py",
"new_path": "tmt/cli.py",
"diff": "@@ -152,7 +152,7 @@ def main(context, root, **kwargs):\n \"\"\" Test Management Tool \"\"\"\n # Initialize metadata tree\n tree = tmt.Tree(root)\n- tree._context = context\n+ tree._save_context(context)\n context.obj = tmt.utils.Common()\n context.obj.tree = tree\n # List of enabled steps\n@@ -164,6 +164,7 @@ def main(context, root, **kwargs):\n tmt.Plan.overview(tree)\n tmt.Story.overview(tree)\n \n+\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n # Run\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n@@ -179,8 +180,8 @@ def main(context, root, **kwargs):\n def run(context, all_, id_, **kwargs):\n \"\"\" Run test steps. \"\"\"\n # Initialize\n+ tmt.Run._save_context(context)\n run = tmt.Run(id_, context.obj.tree)\n- run._context = context\n context.obj.run = run\n \n main.add_command(run)\n@@ -196,7 +197,7 @@ main.add_command(run)\n def discover(context, **kwargs):\n \"\"\" Gather and show information about test cases to be executed. \"\"\"\n context.obj.steps.add('discover')\n- tmt.steps.discover.Discover._context = context\n+ tmt.steps.discover.Discover._save_context(context)\n return 'discover'\n \n \n@@ -238,7 +239,7 @@ def discover(context, **kwargs):\n def provision(context, **kwargs):\n \"\"\" Provision an environment for testing (or use localhost). \"\"\"\n context.obj.steps.add('provision')\n- tmt.steps.provision.Provision._context = context\n+ tmt.steps.provision.Provision._save_context(context)\n \n \n @run.command()\n@@ -257,7 +258,7 @@ def provision(context, **kwargs):\n def prepare(context, **kwargs):\n \"\"\" Configure environment for testing (like ansible playbook). \"\"\"\n context.obj.steps.add('prepare')\n- tmt.steps.prepare.Prepare._context = context\n+ tmt.steps.prepare.Prepare._save_context(context)\n \n \n @run.command()\n@@ -273,7 +274,7 @@ def prepare(context, **kwargs):\n def execute(context, **kwargs):\n \"\"\" Run the tests (using the specified framework and its settings). \"\"\"\n context.obj.steps.add('execute')\n- tmt.steps.execute.Execute._context = context\n+ tmt.steps.execute.Execute._save_context(context)\n \n \n @run.command()\n@@ -286,7 +287,7 @@ def execute(context, **kwargs):\n def report(context, **kwargs):\n \"\"\" Provide an overview of test results and send notifications. \"\"\"\n context.obj.steps.add('report')\n- tmt.steps.report.Report._context = context\n+ tmt.steps.report.Report._save_context(context)\n \n \n @run.command()\n@@ -299,7 +300,7 @@ def report(context, **kwargs):\n def finish(context, **kwargs):\n \"\"\" Additional actions to be performed after the test execution. \"\"\"\n context.obj.steps.add('finish')\n- tmt.steps.finish.Finish._context = context\n+ tmt.steps.finish.Finish._save_context(context)\n \n \n @run.command()\n@@ -315,8 +316,13 @@ def finish(context, **kwargs):\n help=\"Use arbitrary Python expression for filtering.\")\n @verbose_debug_quiet\n def plans(context, **kwargs):\n- \"\"\" Select plans which should be executed. \"\"\"\n- tmt.base.Plan._context = context\n+ \"\"\"\n+ Select plans which should be executed\n+\n+ Regular expression can be used to filter plans by name.\n+ Use '.' to select plans under the current working directory.\n+ \"\"\"\n+ tmt.base.Plan._save_context(context)\n \n \n @run.command()\n@@ -332,8 +338,13 @@ def plans(context, **kwargs):\n help=\"Use arbitrary Python expression for filtering.\")\n @verbose_debug_quiet\n def tests(context, **kwargs):\n- \"\"\" Select tests which should be executed. \"\"\"\n- tmt.base.Test._context = context\n+ \"\"\"\n+ Select tests which should be executed\n+\n+ Regular expression can be used to filter tests by name.\n+ Use '.' to select tests under the current working directory.\n+ \"\"\"\n+ tmt.base.Test._save_context(context)\n \n \n @run.resultcallback()\n@@ -370,8 +381,13 @@ main.add_command(tests)\n @name_filter_condition\n @verbose_debug_quiet\n def ls(context, **kwargs):\n- \"\"\" List available tests. \"\"\"\n- tmt.Test._context = context\n+ \"\"\"\n+ List available tests\n+\n+ Regular expression can be used to filter tests by name.\n+ Use '.' to select tests under the current working directory.\n+ \"\"\"\n+ tmt.Test._save_context(context)\n for test in context.obj.tree.tests():\n test.ls()\n \n@@ -381,8 +397,13 @@ def ls(context, **kwargs):\n @name_filter_condition\n @verbose_debug_quiet\n def show(context, **kwargs):\n- \"\"\" Show test details. \"\"\"\n- tmt.Test._context = context\n+ \"\"\"\n+ Show test details\n+\n+ Regular expression can be used to filter tests by name.\n+ Use '.' to select tests under the current working directory.\n+ \"\"\"\n+ tmt.Test._save_context(context)\n for test in context.obj.tree.tests():\n test.show()\n echo()\n@@ -393,8 +414,13 @@ def show(context, **kwargs):\n @name_filter_condition\n @verbose_debug_quiet\n def lint(context, **kwargs):\n- \"\"\" Check tests against the L1 metadata specification. \"\"\"\n- tmt.Test._context = context\n+ \"\"\"\n+ Check tests against the L1 metadata specification\n+\n+ Regular expression can be used to filter tests for linting.\n+ Use '.' to select tests under the current working directory.\n+ \"\"\"\n+ tmt.Test._save_context(context)\n for test in context.obj.tree.tests():\n test.lint()\n echo()\n@@ -412,7 +438,7 @@ _test_templates = listed(tmt.templates.TEST, join='or')\n @force_dry\n def create(context, name, template, force, **kwargs):\n \"\"\" Create a new test based on given template. \"\"\"\n- tmt.Test._context = context\n+ tmt.Test._save_context(context)\n tmt.Test.create(name, template, context.obj.tree, force)\n \n \n@@ -444,7 +470,7 @@ def import_(context, paths, makefile, nitrate, purpose, **kwargs):\n nitrate ...... contact, component, tag,\n environment, relevancy, enabled\n \"\"\"\n- tmt.Test._context = context\n+ tmt.Test._save_context(context)\n if not paths:\n paths = ['.']\n for path in paths:\n@@ -479,8 +505,13 @@ def import_(context, paths, makefile, nitrate, purpose, **kwargs):\n '-d', '--debug', is_flag=True,\n help='Provide as much debugging details as possible.')\n def export(context, format_, **kwargs):\n- \"\"\" Export test data into the desired format. \"\"\"\n- tmt.Test._context = context\n+ \"\"\"\n+ Export test data into the desired format\n+\n+ Regular expression can be used to filter tests by name.\n+ Use '.' to select tests under the current working directory.\n+ \"\"\"\n+ tmt.Test._save_context(context)\n for test in context.obj.tree.tests():\n echo(test.export(format_=format_))\n \n@@ -500,7 +531,7 @@ def plans(context, **kwargs):\n Search for available plans.\n Explore detailed test step configuration.\n \"\"\"\n- tmt.Plan._context = context\n+ tmt.Plan._save_context(context)\n \n # Show overview of available plans\n if context.invoked_subcommand is None:\n@@ -514,8 +545,13 @@ main.add_command(plans)\n @name_filter_condition\n @verbose_debug_quiet\n def ls(context, **kwargs):\n- \"\"\" List available plans. \"\"\"\n- tmt.Plan._context = context\n+ \"\"\"\n+ List available plans\n+\n+ Regular expression can be used to filter plans by name.\n+ Use '.' to select plans under the current working directory.\n+ \"\"\"\n+ tmt.Plan._save_context(context)\n for plan in context.obj.tree.plans():\n plan.ls()\n \n@@ -525,8 +561,13 @@ def ls(context, **kwargs):\n @name_filter_condition\n @verbose_debug_quiet\n def show(context, **kwargs):\n- \"\"\" Show plan details. \"\"\"\n- tmt.Plan._context = context\n+ \"\"\"\n+ Show plan details\n+\n+ Regular expression can be used to filter plans by name.\n+ Use '.' to select plans under the current working directory.\n+ \"\"\"\n+ tmt.Plan._save_context(context)\n for plan in context.obj.tree.plans():\n plan.show()\n echo()\n@@ -537,8 +578,13 @@ def show(context, **kwargs):\n @name_filter_condition\n @verbose_debug_quiet\n def lint(context, **kwargs):\n- \"\"\" Check plans against the L2 metadata specification. \"\"\"\n- tmt.Plan._context = context\n+ \"\"\"\n+ Check plans against the L2 metadata specification\n+\n+ Regular expression can be used to filter plans by name.\n+ Use '.' to select plans under the current working directory.\n+ \"\"\"\n+ tmt.Plan._save_context(context)\n for plan in context.obj.tree.plans():\n plan.lint()\n echo()\n@@ -556,7 +602,7 @@ _plan_templates = listed(tmt.templates.PLAN, join='or')\n @force_dry\n def create(context, name, template, force, **kwargs):\n \"\"\" Create a new plan based on given template. \"\"\"\n- tmt.Plan._context = context\n+ tmt.Plan._save_context(context)\n tmt.Plan.create(name, template, context.obj.tree, force)\n \n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n@@ -574,7 +620,7 @@ def stories(context, **kwargs):\n Check available user stories.\n Explore coverage (test, implementation, documentation).\n \"\"\"\n- tmt.Story._context = context\n+ tmt.Story._save_context(context)\n \n # Show overview of available stories\n if context.invoked_subcommand is None:\n@@ -591,8 +637,13 @@ main.add_command(stories)\n def ls(\n context, implemented, tested, documented, covered,\n unimplemented, untested, undocumented, uncovered, **kwargs):\n- \"\"\" List available stories. \"\"\"\n- tmt.Story._context = context\n+ \"\"\"\n+ List available stories\n+\n+ Regular expression can be used to filter stories by name.\n+ Use '.' to select stories under the current working directory.\n+ \"\"\"\n+ tmt.Story._save_context(context)\n for story in context.obj.tree.stories():\n if story._match(implemented, tested, documented, covered,\n unimplemented, untested, undocumented, uncovered):\n@@ -607,8 +658,13 @@ def ls(\n def show(\n context, implemented, tested, documented, covered,\n unimplemented, untested, undocumented, uncovered, **kwargs):\n- \"\"\" Show story details. \"\"\"\n- tmt.Story._context = context\n+ \"\"\"\n+ Show story details\n+\n+ Regular expression can be used to filter stories by name.\n+ Use '.' to select stories under the current working directory.\n+ \"\"\"\n+ tmt.Story._save_context(context)\n for story in context.obj.tree.stories():\n if story._match(implemented, tested, documented, covered,\n unimplemented, untested, undocumented, uncovered):\n@@ -628,7 +684,7 @@ _story_templates = listed(tmt.templates.STORY, join='or')\n @force_dry\n def create(context, name, template, force, **kwargs):\n \"\"\" Create a new story based on given template. \"\"\"\n- tmt.Story._context = context\n+ tmt.Story._save_context(context)\n tmt.base.Story.create(name, template, context.obj.tree, force)\n \n \n@@ -647,8 +703,13 @@ def coverage(\n context, code, test, docs,\n implemented, tested, documented, covered,\n unimplemented, untested, undocumented, uncovered, **kwargs):\n- \"\"\" Show code, test and docs coverage for given stories. \"\"\"\n- tmt.Story._context = context\n+ \"\"\"\n+ Show code, test and docs coverage for given stories\n+\n+ Regular expression can be used to filter stories by name.\n+ Use '.' to select stories under the current working directory.\n+ \"\"\"\n+ tmt.Story._save_context(context)\n \n def headfoot(text):\n \"\"\" Format simple header/footer \"\"\"\n@@ -703,8 +764,13 @@ def export(\n context, format_,\n implemented, tested, documented, covered,\n unimplemented, untested, undocumented, uncovered, **kwargs):\n- \"\"\" Export selected stories into desired format. \"\"\"\n- tmt.Story._context = context\n+ \"\"\"\n+ Export selected stories into desired format\n+\n+ Regular expression can be used to filter stories by name.\n+ Use '.' to select stories under the current working directory.\n+ \"\"\"\n+ tmt.Story._save_context(context)\n \n for story in context.obj.tree.stories(whole=True):\n if story._match(implemented, tested, documented, covered,\n"
},
{
"change_type": "MODIFY",
"old_path": "tmt/utils.py",
"new_path": "tmt/utils.py",
"diff": "@@ -54,6 +54,11 @@ class Common(object):\n \"\"\" Name is the default string representation \"\"\"\n return self.name\n \n+ @classmethod\n+ def _save_context(cls, context):\n+ \"\"\" Save provided command line context for future use \"\"\"\n+ cls._context = context\n+\n @classmethod\n def _opt(cls, option, default=None):\n \"\"\" Get an option from the command line context (class version) \"\"\"\n"
}
] |
e2acc7e1b8d14cc3c1f48d9a6609c99f2043c756 | teemtee/tmt | 11.05.2020 15:33:13 | MIT License | Support short Fedora compose aliases in testcloud
Adds support for 'fedora' as the latest released Fedora, 'rawhide'
for the latest Rawhide plus 'fedora-X', 'f-X' and 'fX' shortcuts. | [
{
"change_type": "MODIFY",
"old_path": "tmt/steps/provision/testcloud.py",
"new_path": "tmt/steps/provision/testcloud.py",
"diff": "@@ -120,9 +120,6 @@ DEFAULT_CONNECT_TIMEOUT = 10 # seconds\n \n # Image guessing related variables\n KOJI_URL = 'https://kojipkgs.fedoraproject.org/compose'\n-RAWHIDE_URL = f'{KOJI_URL}/rawhide/latest-Fedora-Rawhide'\n-RAWHIDE_ID = f'{RAWHIDE_URL}/COMPOSE_ID'\n-RAWHIDE_IMAGE_URL = f'{RAWHIDE_URL}/compose/Cloud/x86_64/images'\n \n \n class ProvisionTestcloud(tmt.steps.provision.ProvisionPlugin):\n@@ -142,8 +139,10 @@ class ProvisionTestcloud(tmt.steps.provision.ProvisionPlugin):\n user: root\n memory: 2048\n \n- For the image use 'fedora' for the latest rawhide compose or full\n- url to the qcow2 image for example from:\n+ As the image use 'fedora' for the latest released Fedora compose,\n+ 'rawhide' for the latest Rawhide compose, short aliases such as\n+ 'fedora-32', 'f-32' or 'f32' for specific release or a full url to\n+ the qcow2 image for example from:\n \n https://kojipkgs.fedoraproject.org/compose/\n \n@@ -249,28 +248,47 @@ class GuestTestcloud(tmt.Guest):\n def _guess_image_url(name):\n \"\"\" Guess image url for given name \"\"\"\n \n- def get_compose_id(compose_id_url):\n- response = requests.get(f'{compose_id_url}')\n-\n- if not response:\n+ def latest_release():\n+ \"\"\" Get the latest released Fedora number \"\"\"\n+ try:\n+ response = requests.get(KOJI_URL)\n+ releases = re.findall(r'>(\\d\\d)/<', response.text)\n+ return releases[-1]\n+ except requests.RequestException as error:\n raise ProvisionError(\n- f'Failed to find compose ID for '\n- f\"'{name}' at '{compose_id_url}'\")\n-\n- return response.text\n-\n- # Map fedora, rawhide or fedora-rawhide to latest rawhide image\n- if re.match(r'^(fedora|fedora-rawhide|rawhide)$', name, re.IGNORECASE):\n- compose_id = get_compose_id(RAWHIDE_ID)\n- compose_name = compose_id.replace(\n- 'Fedora-Rawhide', 'Fedora-Cloud-Base-Rawhide')\n- return f'{RAWHIDE_IMAGE_URL}/{compose_name}.x86_64.qcow2'\n+ f\"Unable to check Fedora composes ({error}).\")\n+ except IndexError:\n+ raise ProvisionError(\n+ f\"Latest Fedora release not found at '{KOJI_URL}'.\")\n \n # Try to check if given url is a local file\n if os.path.exists(name):\n return f'file://{name}'\n \n- raise ProvisionError(f\"Could not map '{name}' to compose.\")\n+ # Map fedora aliases (e.g. rawhide, fedora, fedora-32, f-32, f32)\n+ name = name.lower().strip()\n+ matched = re.match(r'^f(edora)?-?(\\d+)$', name)\n+ if matched:\n+ release = matched.group(2)\n+ elif 'rawhide' in name:\n+ release = 'rawhide'\n+ elif name == 'fedora':\n+ release = latest_release()\n+ else:\n+ raise ProvisionError(f\"Could not map '{name}' to compose.\")\n+\n+ # Prepare the full qcow name\n+ images = f\"{KOJI_URL}/{release}/latest-Fedora-{release.capitalize()}\"\n+ images += \"/compose/Cloud/x86_64/images\"\n+ response = requests.get(images)\n+ matched = re.search(\">(Fedora-Cloud[^<]*qcow2)<\", response.text)\n+ try:\n+ compose_name = matched.group(1)\n+ except AttributeError:\n+ raise ProvisionError(\n+ f\"Failed to detect full compose name from '{images}'.\")\n+ return f'{images}/{compose_name}'\n+\n \n @staticmethod\n def _create_template():\n"
}
] |
8ec33c15a1586b5514b049600e960764a64a11dc | teemtee/tmt | 13.07.2020 16:58:01 | MIT License | Move the Guest class from base to steps.provision
Here it makes more sense as it is related to provision classes. | [
{
"change_type": "MODIFY",
"old_path": "tmt/__init__.py",
"new_path": "tmt/__init__.py",
"diff": "@@ -1,5 +1,6 @@\n \"\"\" Test Management Tool \"\"\"\n \n-from tmt.base import Tree, Test, Plan, Story, Run, Guest, Result\n+from tmt.base import Tree, Test, Plan, Story, Run, Result\n+from tmt.steps.provision import Guest\n \n __all__ = ['Tree', 'Test', 'Plan', 'Story', 'Run', 'Guest', 'Result']\n"
},
{
"change_type": "MODIFY",
"old_path": "tmt/base.py",
"new_path": "tmt/base.py",
"diff": "@@ -7,8 +7,6 @@ import fmf\n import yaml\n import click\n import pprint\n-import random\n-import string\n import subprocess\n \n import tmt.steps\n@@ -910,197 +908,6 @@ class Run(tmt.utils.Common):\n self.finish()\n \n \n-class Guest(tmt.utils.Common):\n- \"\"\"\n- Guest environment provisioned for test execution\n-\n- The following keys are expected in the 'data' dictionary::\n-\n- guest ...... hostname or ip address\n- user ....... user name to log in\n- key ........ private key\n- password ... password\n-\n- These are by default imported into instance attributes.\n- \"\"\"\n-\n- # Supported keys (used for import/export to/from attributes)\n- _keys = ['guest', 'user', 'key', 'password']\n-\n- def __init__(self, data, name=None, parent=None):\n- \"\"\" Initialize guest data \"\"\"\n- super().__init__(parent, name)\n- self.load(data)\n-\n- def _random_name(self):\n- \"\"\" Generate a random name \"\"\"\n- return ''.join(random.choices(string.ascii_letters, k=16))\n-\n- def _ssh_guest(self):\n- \"\"\" Return user@guest \"\"\"\n- return f'{self.user}@{self.guest}'\n-\n- def _ssh_options(self, join=False):\n- \"\"\" Return common ssh options (list or joined) \"\"\"\n- options = [\n- '-oStrictHostKeyChecking=no',\n- '-oUserKnownHostsFile=/dev/null',\n- ]\n- if self.key:\n- options.extend(['-i', self.key])\n- return ' '.join(options) if join else options\n-\n- def _ssh_command(self, join=False):\n- \"\"\" Prepare an ssh command line for execution (list or joined) \"\"\"\n- command = ['sshpass', f'-p{self.password}'] if self.password else []\n- command += ['ssh'] + self._ssh_options()\n- return ' '.join(command) if join else command\n-\n- def load(self, data):\n- \"\"\" Load guest data for easy access \"\"\"\n- for key in self._keys:\n- setattr(self, key, data.get(key))\n-\n- def save(self):\n- \"\"\" Save guest data for future wake up \"\"\"\n- data = dict()\n- for key in self._keys:\n- value = getattr(self, key)\n- if value is not None:\n- data[key] = value\n- return data\n-\n- def wake(self):\n- \"\"\" Wake up the guest \"\"\"\n- self.debug(f\"Doing nothing to wake up guest '{self.guest}'.\")\n-\n- def start(self):\n- \"\"\" Start the guest \"\"\"\n- self.debug(f\"Doing nothing to start guest '{self.guest}'.\")\n-\n- def details(self):\n- \"\"\" Show guest details such as distro and kernel \"\"\"\n- # Skip distro & kernel check in dry mode\n- if self.opt('dry'):\n- return\n- # Distro\n- try:\n- distro = self.execute(\n- 'cat /etc/redhat-release', dry=True)[0].strip()\n- except tmt.utils.RunError:\n- try:\n- distro = self.execute('cat /etc/lsb-release')[0].strip()\n- distro = re.search('DESCRIPTION=\"(.*)\"', distro).group(1)\n- except (tmt.utils.RunError, AttributeError):\n- distro = None\n- if distro:\n- self.info('distro', distro, 'green')\n- # Kernel\n- kernel = self.execute('uname -r', dry=True)[0].strip()\n- self.verbose('kernel', kernel, 'green')\n-\n- def _ansible_verbosity(self):\n- \"\"\" Prepare verbose level based on the --debug option count \"\"\"\n- if self.opt('debug') < 3:\n- return ''\n- else:\n- return ' -' + (self.opt('debug') - 2) * 'v'\n-\n- def _ansible_summary(self, output):\n- \"\"\" Check the output for ansible result summary numbers \"\"\"\n- if not output:\n- return\n- keys = 'ok changed unreachable failed skipped rescued ignored'.split()\n- for key in keys:\n- matched = re.search(rf'^.*\\s:\\s.*{key}=(\\d+).*$', output, re.M)\n- if matched and int(matched.group(1)) > 0:\n- tasks = fmf.utils.listed(matched.group(1), 'task')\n- self.verbose(key, tasks, 'green')\n-\n- def _ansible_playbook_path(self, playbook):\n- \"\"\" Prepare full ansible playbook path \"\"\"\n- # Playbook paths should be relative to the metadata tree root\n- self.debug(f\"Applying playbook '{playbook}' on guest '{self.guest}'.\")\n- playbook = os.path.join(self.parent.plan.run.tree.root, playbook)\n- self.debug(f\"Playbook full path: '{playbook}'\", level=2)\n- return playbook\n-\n- def _export_environment(self, execute_environment=None):\n- \"\"\" Prepare shell export of environment variables \"\"\"\n- # Prepare environment variables so they can be correctly passed\n- # to ssh's shell. Create a copy to prevent modifying source.\n- environment = dict()\n- environment.update(execute_environment or dict())\n- # Plan environment and variables provided on the command line\n- # override environment provided to execute().\n- environment.update(self.parent.plan.environment)\n- # Prepend with export and run as a separate command.\n- if not environment:\n- return ''\n- return 'export {}; '.format(\n- ' '.join(tmt.utils.shell_variables(environment)))\n-\n- def ansible(self, playbook):\n- \"\"\" Prepare guest using ansible playbook \"\"\"\n- playbook = self._ansible_playbook_path(playbook)\n- stdout, stderr = self.run(\n- f'stty cols {tmt.utils.OUTPUT_WIDTH}; ansible-playbook '\n- f'--ssh-common-args=\"{self._ssh_options(join=True)}\" '\n- f'-e ansible_python_interpreter=auto'\n- f'{self._ansible_verbosity()} -i {self._ssh_guest()}, {playbook}')\n- self._ansible_summary(stdout)\n-\n- def execute(self, command, **kwargs):\n- \"\"\"\n- Execute command on the guest\n-\n- command ....... string or list of command arguments\n- environment ... dictionary with environment variables\n- \"\"\"\n-\n- # Prepare the export of environment variables\n- environment = self._export_environment(kwargs.get('env', dict()))\n-\n- # Change to given directory on guest if cwd provided\n- directory = kwargs.get('cwd', '')\n- if directory:\n- directory = f\"cd '{directory}'; \"\n-\n- # Run in interactive mode if requested\n- interactive = ['-t'] if kwargs.get('interactive') else []\n-\n- # Prepare command and run it\n- if isinstance(command, (list, tuple)):\n- command = ' '.join(command)\n- self.debug(f\"Execute command '{command}' on guest '{self.guest}'.\")\n- command = (\n- self._ssh_command() + interactive + [self._ssh_guest()] +\n- [f'{environment}{directory}{command}'])\n- return self.run(command, shell=False, **kwargs)\n-\n- def push(self):\n- \"\"\" Push workdir to guest \"\"\"\n- self.debug(f\"Push workdir to guest '{self.guest}'.\")\n- self.run(\n- f'rsync -Rrze \"{self._ssh_command(join=True)}\" --delete '\n- f'{self.parent.plan.workdir} {self._ssh_guest()}:/')\n-\n- def pull(self):\n- \"\"\" Pull workdir from guest \"\"\"\n- self.debug(f\"Pull workdir from guest '{self.guest}'.\")\n- self.run(\n- f'rsync -Rrze \"{self._ssh_command(join=True)}\" '\n- f'{self._ssh_guest()}:{self.parent.plan.workdir} /')\n-\n- def stop(self):\n- \"\"\" Stop the guest \"\"\"\n- self.debug(f\"Doing nothing to stop guest '{self.guest}'.\")\n-\n- def remove(self):\n- \"\"\" Remove the guest (disk cleanup) \"\"\"\n- self.debug(f\"Doing nothing to remove guest '{self.guest}'.\")\n-\n-\n class Result(object):\n \"\"\"\n Test result\n"
},
{
"change_type": "MODIFY",
"old_path": "tmt/steps/provision/__init__.py",
"new_path": "tmt/steps/provision/__init__.py",
"diff": "@@ -1,9 +1,13 @@\n import os\n+import re\n import click\n+import random\n+import string\n \n import fmf\n import tmt\n \n+\n class Provision(tmt.steps.Step):\n \"\"\" Provision an environment for testing or use localhost \"\"\"\n \n@@ -146,3 +150,194 @@ class ProvisionPlugin(tmt.steps.Plugin):\n Should return a provisioned Guest() instance.\n \"\"\"\n raise NotImplementedError\n+\n+\n+class Guest(tmt.utils.Common):\n+ \"\"\"\n+ Guest environment provisioned for test execution\n+\n+ The following keys are expected in the 'data' dictionary::\n+\n+ guest ...... hostname or ip address\n+ user ....... user name to log in\n+ key ........ private key\n+ password ... password\n+\n+ These are by default imported into instance attributes.\n+ \"\"\"\n+\n+ # Supported keys (used for import/export to/from attributes)\n+ _keys = ['guest', 'user', 'key', 'password']\n+\n+ def __init__(self, data, name=None, parent=None):\n+ \"\"\" Initialize guest data \"\"\"\n+ super().__init__(parent, name)\n+ self.load(data)\n+\n+ def _random_name(self):\n+ \"\"\" Generate a random name \"\"\"\n+ return ''.join(random.choices(string.ascii_letters, k=16))\n+\n+ def _ssh_guest(self):\n+ \"\"\" Return user@guest \"\"\"\n+ return f'{self.user}@{self.guest}'\n+\n+ def _ssh_options(self, join=False):\n+ \"\"\" Return common ssh options (list or joined) \"\"\"\n+ options = [\n+ '-oStrictHostKeyChecking=no',\n+ '-oUserKnownHostsFile=/dev/null',\n+ ]\n+ if self.key:\n+ options.extend(['-i', self.key])\n+ return ' '.join(options) if join else options\n+\n+ def _ssh_command(self, join=False):\n+ \"\"\" Prepare an ssh command line for execution (list or joined) \"\"\"\n+ command = ['sshpass', f'-p{self.password}'] if self.password else []\n+ command += ['ssh'] + self._ssh_options()\n+ return ' '.join(command) if join else command\n+\n+ def load(self, data):\n+ \"\"\" Load guest data for easy access \"\"\"\n+ for key in self._keys:\n+ setattr(self, key, data.get(key))\n+\n+ def save(self):\n+ \"\"\" Save guest data for future wake up \"\"\"\n+ data = dict()\n+ for key in self._keys:\n+ value = getattr(self, key)\n+ if value is not None:\n+ data[key] = value\n+ return data\n+\n+ def wake(self):\n+ \"\"\" Wake up the guest \"\"\"\n+ self.debug(f\"Doing nothing to wake up guest '{self.guest}'.\")\n+\n+ def start(self):\n+ \"\"\" Start the guest \"\"\"\n+ self.debug(f\"Doing nothing to start guest '{self.guest}'.\")\n+\n+ def details(self):\n+ \"\"\" Show guest details such as distro and kernel \"\"\"\n+ # Skip distro & kernel check in dry mode\n+ if self.opt('dry'):\n+ return\n+ # Distro\n+ try:\n+ distro = self.execute(\n+ 'cat /etc/redhat-release', dry=True)[0].strip()\n+ except tmt.utils.RunError:\n+ try:\n+ distro = self.execute('cat /etc/lsb-release')[0].strip()\n+ distro = re.search('DESCRIPTION=\"(.*)\"', distro).group(1)\n+ except (tmt.utils.RunError, AttributeError):\n+ distro = None\n+ if distro:\n+ self.info('distro', distro, 'green')\n+ # Kernel\n+ kernel = self.execute('uname -r', dry=True)[0].strip()\n+ self.verbose('kernel', kernel, 'green')\n+\n+ def _ansible_verbosity(self):\n+ \"\"\" Prepare verbose level based on the --debug option count \"\"\"\n+ if self.opt('debug') < 3:\n+ return ''\n+ else:\n+ return ' -' + (self.opt('debug') - 2) * 'v'\n+\n+ def _ansible_summary(self, output):\n+ \"\"\" Check the output for ansible result summary numbers \"\"\"\n+ if not output:\n+ return\n+ keys = 'ok changed unreachable failed skipped rescued ignored'.split()\n+ for key in keys:\n+ matched = re.search(rf'^.*\\s:\\s.*{key}=(\\d+).*$', output, re.M)\n+ if matched and int(matched.group(1)) > 0:\n+ tasks = fmf.utils.listed(matched.group(1), 'task')\n+ self.verbose(key, tasks, 'green')\n+\n+ def _ansible_playbook_path(self, playbook):\n+ \"\"\" Prepare full ansible playbook path \"\"\"\n+ # Playbook paths should be relative to the metadata tree root\n+ self.debug(f\"Applying playbook '{playbook}' on guest '{self.guest}'.\")\n+ playbook = os.path.join(self.parent.plan.run.tree.root, playbook)\n+ self.debug(f\"Playbook full path: '{playbook}'\", level=2)\n+ return playbook\n+\n+ def _export_environment(self, execute_environment=None):\n+ \"\"\" Prepare shell export of environment variables \"\"\"\n+ # Prepare environment variables so they can be correctly passed\n+ # to ssh's shell. Create a copy to prevent modifying source.\n+ environment = dict()\n+ environment.update(execute_environment or dict())\n+ # Plan environment and variables provided on the command line\n+ # override environment provided to execute().\n+ environment.update(self.parent.plan.environment)\n+ # Prepend with export and run as a separate command.\n+ if not environment:\n+ return ''\n+ return 'export {}; '.format(\n+ ' '.join(tmt.utils.shell_variables(environment)))\n+\n+ def ansible(self, playbook):\n+ \"\"\" Prepare guest using ansible playbook \"\"\"\n+ playbook = self._ansible_playbook_path(playbook)\n+ stdout, stderr = self.run(\n+ f'stty cols {tmt.utils.OUTPUT_WIDTH}; ansible-playbook '\n+ f'--ssh-common-args=\"{self._ssh_options(join=True)}\" '\n+ f'-e ansible_python_interpreter=auto'\n+ f'{self._ansible_verbosity()} -i {self._ssh_guest()}, {playbook}')\n+ self._ansible_summary(stdout)\n+\n+ def execute(self, command, **kwargs):\n+ \"\"\"\n+ Execute command on the guest\n+\n+ command ....... string or list of command arguments\n+ environment ... dictionary with environment variables\n+ \"\"\"\n+\n+ # Prepare the export of environment variables\n+ environment = self._export_environment(kwargs.get('env', dict()))\n+\n+ # Change to given directory on guest if cwd provided\n+ directory = kwargs.get('cwd', '')\n+ if directory:\n+ directory = f\"cd '{directory}'; \"\n+\n+ # Run in interactive mode if requested\n+ interactive = ['-t'] if kwargs.get('interactive') else []\n+\n+ # Prepare command and run it\n+ if isinstance(command, (list, tuple)):\n+ command = ' '.join(command)\n+ self.debug(f\"Execute command '{command}' on guest '{self.guest}'.\")\n+ command = (\n+ self._ssh_command() + interactive + [self._ssh_guest()] +\n+ [f'{environment}{directory}{command}'])\n+ return self.run(command, shell=False, **kwargs)\n+\n+ def push(self):\n+ \"\"\" Push workdir to guest \"\"\"\n+ self.debug(f\"Push workdir to guest '{self.guest}'.\")\n+ self.run(\n+ f'rsync -Rrze \"{self._ssh_command(join=True)}\" --delete '\n+ f'{self.parent.plan.workdir} {self._ssh_guest()}:/')\n+\n+ def pull(self):\n+ \"\"\" Pull workdir from guest \"\"\"\n+ self.debug(f\"Pull workdir from guest '{self.guest}'.\")\n+ self.run(\n+ f'rsync -Rrze \"{self._ssh_command(join=True)}\" '\n+ f'{self._ssh_guest()}:{self.parent.plan.workdir} /')\n+\n+ def stop(self):\n+ \"\"\" Stop the guest \"\"\"\n+ self.debug(f\"Doing nothing to stop guest '{self.guest}'.\")\n+\n+ def remove(self):\n+ \"\"\" Remove the guest (disk cleanup) \"\"\"\n+ self.debug(f\"Doing nothing to remove guest '{self.guest}'.\")\n"
},
{
"change_type": "MODIFY",
"old_path": "tmt/steps/provision/connect.py",
"new_path": "tmt/steps/provision/connect.py",
"diff": "@@ -101,7 +101,7 @@ class ProvisionConnect(tmt.steps.provision.ProvisionPlugin):\n data['key'] = key\n \n # And finally create the guest\n- self._guest = tmt.base.Guest(data, name=self.name, parent=self.step)\n+ self._guest = tmt.Guest(data, name=self.name, parent=self.step)\n \n def guest(self):\n \"\"\" Return the provisioned guest \"\"\"\n"
}
] |
3791880e2acac8537f2b293bafa9e11b12d5d087 | teemtee/tmt | 21.09.2020 10:13:49 | MIT License | Adjust the minute provision debug messages wording
Just a couple of minor cosmetic adjustments to make the messages
consistent with the rest. | [
{
"change_type": "MODIFY",
"old_path": "tmt/steps/provision/minute.py",
"new_path": "tmt/steps/provision/minute.py",
"diff": "@@ -107,7 +107,7 @@ class ProvisionMinute(tmt.steps.provision.ProvisionPlugin):\n \n # Read API URL from 1minutetip script\n try:\n- self.debug(f\"Getting the API URL from {SCRIPT_PATH}\")\n+ self.debug(f\"Get the API URL from '{SCRIPT_PATH}'.\")\n script_content = self.read(SCRIPT_PATH)\n match = re.search(API_URL_RE, script_content)\n if not match:\n@@ -170,7 +170,7 @@ class GuestMinute(tmt.Guest):\n return data\n \n def _guess_net_id(self):\n- self.debug(\"Checking network IP availability\")\n+ self.debug(\"Check the network IP availability.\")\n _, networks = run_openstack(\n self.api_url, 'ip availability list -f json')\n networks = json.loads(networks)\n@@ -186,7 +186,7 @@ class GuestMinute(tmt.Guest):\n best = max(\n networks, key=lambda x: x.get('Total IPs') - x.get('Used IPs'))\n self.debug(\n- f'Using the following network:\\n{json.dumps(best, indent=2)}',\n+ f'Use the following network:\\n{json.dumps(best, indent=2)}',\n level=2, shift=0)\n return best['Network ID'], best['Network Name']\n \n@@ -200,7 +200,7 @@ class GuestMinute(tmt.Guest):\n if not network_id:\n return False\n \n- self.debug(f\"Trying to boot a new openstack machine\")\n+ self.debug(f\"Try to boot a new openstack machine.\")\n error, net_info = run_openstack(\n self.api_url,\n f'server create --wait '\n@@ -221,7 +221,7 @@ class GuestMinute(tmt.Guest):\n self.guest = match.group('ip')\n \n # Wait for ssh connection\n- self.debug(\"Waiting for an ssh connection to the machine\")\n+ self.debug(\"Wait for an ssh connection to the machine.\")\n for i in range(1, DEFAULT_CONNECT_TIMEOUT):\n try:\n self.execute('whoami')\n@@ -231,32 +231,32 @@ class GuestMinute(tmt.Guest):\n time.sleep(1)\n \n if i == DEFAULT_CONNECT_TIMEOUT:\n- self.debug(\"Failed to boot the machine, removing it\")\n+ self.debug(\"Failed to boot the machine, removing it.\")\n self.delete()\n return False\n return True\n \n def _setup_machine(self):\n- self.debug(\"Trying to get a prereserved minute machine\")\n+ self.debug(\"Try to get a prereserved minute machine.\")\n response = retry_session().get(\n f'{self.api_url}?image_name={self.mt_image}'\n f'&user={self.username}&osver=rhos10', verify=False)\n if not response.ok:\n return\n- self.debug(f\"Result of getting prereserved machine: {response.text}\")\n+ self.debug(f\"Prereserved machine result: {response.text}\")\n # No prereserved machine, boot a new one\n if 'prereserve' not in response.text:\n return self._boot_machine()\n # Rename the prereserved machine\n old_name, self.guest = response.text.split()\n- self.debug(f\"Renaming the prereserved machine \"\n- f\"from {old_name} to {self.instance_name}\")\n+ self.debug(\n+ f\"Rename the machine from '{old_name}' to '{self.instance_name}'.\")\n _, rename_out = run_openstack(\n self.api_url, f'server set --name {self.instance_name} {old_name}')\n if rename_out is None or 'ERROR' in rename_out:\n return False\n # Machine renamed, set properties\n- self.debug(\"Changing properties of the prereserved machine\")\n+ self.debug(\"Change properties of the prereserved machine.\")\n run_openstack(\n self.api_url,\n f'server set --property local_user={self.username} '\n@@ -289,7 +289,7 @@ class GuestMinute(tmt.Guest):\n \"\"\"\n mt_image = image\n image_lower = image.lower().strip()\n- self.debug(\"Obtaining 1MT images\")\n+ self.debug(\"Check for available 1MT images.\")\n _, images = run_openstack(\n self.api_url, 'image list -f value -c Name', True)\n images = images.splitlines()\n@@ -349,7 +349,7 @@ class GuestMinute(tmt.Guest):\n \"All attempts to provision a machine with 1minutetip failed.\")\n \n def delete(self):\n- self.debug(f\"Removing the minute instance {self.instance_name}\")\n+ self.debug(f\"Remove the minute instance '{self.instance_name}'.\")\n run_openstack(self.api_url, f'server delete {self.instance_name}')\n \n def remove(self):\n"
}
] |
c82819fb89809c4cc165b8a05dd54a47d6be0fb1 | teemtee/tmt | 15.02.2021 12:01:35 | MIT License | Adjust the default branch handling for libraries
Detect the default branch and store it after cloning the repo.
This seems to make the implementation a bit more readable. | [
{
"change_type": "MODIFY",
"old_path": "tests/unit/test_beakerlib.py",
"new_path": "tests/unit/test_beakerlib.py",
"diff": "@@ -18,26 +18,26 @@ def test_library():\n assert library.format == 'rpm'\n assert library.repo == 'openssl'\n assert library.url == 'https://github.com/beakerlib/openssl'\n- assert library.ref == 'master' # default branch is called master\n+ assert library.ref == 'master' # The default branch is master\n assert library.dest == tmt.beakerlib.DEFAULT_DESTINATION\n shutil.rmtree(library.parent.workdir)\n \n \n @pytest.mark.web\n-def test_library_from_fmf():\n+@pytest.mark.parametrize(\n+ 'url, name, default_branch', [\n+ ('https://github.com/beakerlib/httpd', '/http', 'master'),\n+ ('https://github.com/beakerlib/example', '/file', 'main')\n+ ])\n+def test_library_from_fmf(url, name, default_branch):\n \"\"\" Fetch beakerlib library referenced by fmf identifier \"\"\"\n- library = tmt.beakerlib.Library(\n- {\n- 'url': 'https://github.com/beakerlib/httpd',\n- 'name': '/http'\n- }\n- )\n+ library = tmt.beakerlib.Library(dict(url=url, name=name))\n assert library.format == 'fmf'\n- assert library.ref == 'master' # default branch is called master\n- assert library.url == 'https://github.com/beakerlib/httpd'\n+ assert library.ref == default_branch\n+ assert library.url == url\n assert library.dest == tmt.beakerlib.DEFAULT_DESTINATION\n- assert library.repo == 'httpd'\n- assert library.name == '/http'\n+ assert library.repo == url.split('/')[-1]\n+ assert library.name == name\n shutil.rmtree(library.parent.workdir)\n \n \n@@ -61,7 +61,7 @@ def test_dependencies():\n assert libraries[0].repo == 'httpd'\n assert libraries[0].name == '/http'\n assert libraries[0].url == 'https://github.com/beakerlib/httpd'\n- assert libraries[0].ref == 'master' # default branch is called master\n+ assert libraries[0].ref == 'master' # The default branch is master\n assert libraries[0].dest == tmt.beakerlib.DEFAULT_DESTINATION\n assert libraries[1].repo == 'openssl'\n assert libraries[1].name == '/certgen'\n"
},
{
"change_type": "MODIFY",
"old_path": "tmt/beakerlib.py",
"new_path": "tmt/beakerlib.py",
"diff": "@@ -2,7 +2,6 @@\n \n import re\n import os\n-import shutil\n \n import fmf\n import tmt\n@@ -52,6 +51,9 @@ class Library(object):\n # Use an empty common class if parent not provided (for logging, cache)\n self.parent = parent or tmt.utils.Common(workdir=True)\n \n+ # Default branch is detected from the origin after cloning\n+ self.default_branch = None\n+\n # The 'library(repo/lib)' format\n if isinstance(identifier, str):\n identifier = identifier.strip()\n@@ -62,8 +64,7 @@ class Library(object):\n self.format = 'rpm'\n self.repo, self.name = matched.groups()\n self.url = os.path.join(DEFAULT_REPOSITORY, self.repo)\n- self.ref = None # final value\n- self.__ref = None # original value\n+ self.ref = None\n self.dest = DEFAULT_DESTINATION\n \n # The fmf identifier\n@@ -71,8 +72,7 @@ class Library(object):\n self.parent.debug(f\"Detected library '{identifier}'.\", level=3)\n self.format = 'fmf'\n self.url = identifier.get('url')\n- self.ref = identifier.get('ref', None) # final value\n- self.__ref = None # original value\n+ self.ref = identifier.get('ref', None)\n self.dest = identifier.get(\n 'destination', DEFAULT_DESTINATION).lstrip('/')\n self.name = identifier.get('name', '/')\n@@ -110,18 +110,19 @@ class Library(object):\n # Check if the library was already fetched\n try:\n library = self.parent._library_cache[self.repo]\n+ # The url must be identical\n if library.url != self.url:\n raise tmt.utils.GeneralError(\n f\"Library '{self.repo}' with url '{self.url}' conflicts \"\n f\"with already fetched library from '{library.url}'.\")\n- if library.__ref != self.__ref:\n- # .__ref can be None, indicating we want default branch\n- # .ref is always a brach/commit/tag string\n- lib_ref = library.__ref if library.__ref else '<default branch>'\n- self_ref = self.__ref if self.__ref else '<default branch>'\n+ # Use the default branch if no ref provided\n+ if self.ref is None:\n+ self.ref = library.default_branch\n+ # The same ref has to be used\n+ if library.ref != self.ref:\n raise tmt.utils.GeneralError(\n- f\"Library '{self.repo}' using ref '{self_ref}' conflicts \"\n- f\"with already fetched library using ref '{lib_ref}'.\")\n+ f\"Library '{self.repo}' using ref '{self.ref}' conflicts \"\n+ f\"with already fetched library using ref '{library.ref}'.\")\n self.parent.debug(f\"Library '{self}' already fetched.\", level=3)\n # Reuse the existing metadata tree\n self.tree = library.tree\n@@ -129,17 +130,20 @@ class Library(object):\n except KeyError:\n self.parent.debug(f\"Fetch library '{self}'.\", level=3)\n # Prepare path, clone the repository, checkout ref\n- directory = os.path.join(\n- self.parent.workdir, self.dest, self.repo)\n+ directory = os.path.join(self.parent.workdir, self.dest, self.repo)\n # Clone repo with disabled prompt to ignore missing/private repos\n try:\n self.parent.run(\n ['git', 'clone', self.url, directory],\n shell=False, env={\"GIT_ASKPASS\": \"echo\"})\n- # Store the default_branch\n+ # Detect the default branch from the origin\n+ # The ref format is 'ref: refs/remotes/origin/master'\n head = os.path.join(directory, '.git/refs/remotes/origin/HEAD')\n- default = os.path.join(directory, '.git/refs/heads/__DEFAULT__')\n- shutil.copyfile(head, default)\n+ with open(head) as ref:\n+ self.default_branch = ref.read().strip().split('/')[-1]\n+ # Use the default branch if no ref provided\n+ if self.ref is None:\n+ self.ref = self.default_branch\n except tmt.utils.RunError as error:\n # Fallback to install during the prepare step if in rpm format\n if self.format == 'rpm':\n@@ -148,13 +152,8 @@ class Library(object):\n self.parent.fail(\n f\"Failed to fetch library '{self}' from '{self.url}'.\")\n raise\n- # Check out the requested branch (sets real name of default branch)\n+ # Check out the requested branch\n try:\n- # wants default branch -> replace with the name of real default branch\n- if self.ref is None:\n- with open(os.path.join(directory, '.git/refs/heads/__DEFAULT__')) as f_head:\n- # content should be `ref: refs/remotes/origin/master`\n- self.ref = f_head.read().strip().split('/')[-1]\n self.parent.run(\n ['git', 'checkout', self.ref], shell=False, cwd=directory)\n except tmt.utils.RunError as error:\n"
}
] |
c9605aea40808f5b0bf00bbbd0ec679c09b760ac | teemtee/tmt | 11.02.2022 15:10:31 | MIT License | Implement a generic `requires` for all plugins
Add support for gathering packages required on the guest to all
supported plugins to allow easier future extension if needed. | [
{
"change_type": "MODIFY",
"old_path": "tmt/steps/__init__.py",
"new_path": "tmt/steps/__init__.py",
"diff": "@@ -421,6 +421,10 @@ class Plugin(tmt.utils.Common, metaclass=PluginIndex):\n # Include order in verbose mode\n self.verbose('order', self.order, 'magenta', level=3)\n \n+ def requires(self):\n+ \"\"\" List of packages required by the plugin on the guest \"\"\"\n+ return []\n+\n \n class Action(tmt.utils.Common):\n \"\"\" A special action performed during a normal step. \"\"\"\n"
},
{
"change_type": "MODIFY",
"old_path": "tmt/steps/finish/__init__.py",
"new_path": "tmt/steps/finish/__init__.py",
"diff": "@@ -86,6 +86,19 @@ class Finish(tmt.steps.Step):\n self.status('done')\n self.save()\n \n+ def requires(self):\n+ \"\"\"\n+ Packages required by all enabled finish plugins\n+\n+ Return a list of packages which need to be installed on the\n+ provisioned guest so that the finishing tasks work well.\n+ Used by the prepare step.\n+ \"\"\"\n+ requires = set()\n+ for plugin in self.plugins(classes=FinishPlugin):\n+ requires.update(plugin.requires())\n+ return list(requires)\n+\n \n class FinishPlugin(tmt.steps.Plugin):\n \"\"\" Common parent of finish plugins \"\"\"\n"
},
{
"change_type": "MODIFY",
"old_path": "tmt/steps/prepare/__init__.py",
"new_path": "tmt/steps/prepare/__init__.py",
"diff": "@@ -87,7 +87,10 @@ class Prepare(tmt.steps.Step):\n requires = set(\n self.plan.discover.requires() +\n self.plan.provision.requires() +\n- self.plan.execute.requires()\n+ self.plan.prepare.requires() +\n+ self.plan.execute.requires() +\n+ self.plan.report.requires() +\n+ self.plan.finish.requires()\n )\n \n if requires:\n@@ -147,6 +150,19 @@ class Prepare(tmt.steps.Step):\n self.status('done')\n self.save()\n \n+ def requires(self):\n+ \"\"\"\n+ Packages required by all enabled prepare plugins\n+\n+ Return a list of packages which need to be installed on the\n+ provisioned guest so that the preparation tasks work well.\n+ Used by the prepare step.\n+ \"\"\"\n+ requires = set()\n+ for plugin in self.plugins(classes=PreparePlugin):\n+ requires.update(plugin.requires())\n+ return list(requires)\n+\n \n class PreparePlugin(tmt.steps.Plugin):\n \"\"\" Common parent of prepare plugins \"\"\"\n"
},
{
"change_type": "MODIFY",
"old_path": "tmt/steps/provision/local.py",
"new_path": "tmt/steps/provision/local.py",
"diff": "@@ -76,8 +76,3 @@ class GuestLocal(tmt.Guest):\n \n def pull(self, source=None, destination=None, options=None):\n \"\"\" Nothing to be done to pull workdir \"\"\"\n-\n- @classmethod\n- def requires(cls):\n- \"\"\" No packages needed to sync workdir \"\"\"\n- return []\n"
},
{
"change_type": "MODIFY",
"old_path": "tmt/steps/provision/podman.py",
"new_path": "tmt/steps/provision/podman.py",
"diff": "@@ -218,8 +218,3 @@ class GuestContainer(tmt.Guest):\n if self.container:\n self.podman(['container', 'rm', '-f', self.container])\n self.info('container', 'removed', 'green')\n-\n- @classmethod\n- def requires(cls):\n- \"\"\" No packages needed to sync workdir to the container \"\"\"\n- return []\n"
},
{
"change_type": "MODIFY",
"old_path": "tmt/steps/report/__init__.py",
"new_path": "tmt/steps/report/__init__.py",
"diff": "@@ -60,6 +60,19 @@ class Report(tmt.steps.Step):\n self.status('done')\n self.save()\n \n+ def requires(self):\n+ \"\"\"\n+ Packages required by all enabled report plugins\n+\n+ Return a list of packages which need to be installed on the\n+ provisioned guest so that the full report can be successfully\n+ generated. Used by the prepare step.\n+ \"\"\"\n+ requires = set()\n+ for plugin in self.plugins(classes=ReportPlugin):\n+ requires.update(plugin.requires())\n+ return list(requires)\n+\n \n class ReportPlugin(tmt.steps.Plugin):\n \"\"\" Common parent of report plugins \"\"\"\n"
}
] |
4dc7a314bb39c63b2f0e822d248bb24bed129c1d | teemtee/tmt | 12.04.2022 14:48:46 | MIT License | Add a nice provisioning progress to Artemis plugin
Artemis plugin now displays the sequence of of guest states
as its provisioning progresses. This has been implemented as
a reusable primitive context manager, coupled with a small
refactoring of code taking care of indentation. | [
{
"change_type": "MODIFY",
"old_path": "tmt/steps/execute/internal.py",
"new_path": "tmt/steps/execute/internal.py",
"diff": "@@ -106,6 +106,8 @@ class ExecuteInternal(tmt.steps.execute.ExecutePlugin):\n # Make sure that script is a list\n tmt.utils.listify(self.data, keys=['script'])\n \n+ # TODO: consider switching to utils.updatable_message() - might need more\n+ # work, since use of _show_progress is split over several methods.\n def _show_progress(self, progress, test_name, finish=False):\n \"\"\"\n Show an interactive progress bar in non-verbose mode.\n"
},
{
"change_type": "MODIFY",
"old_path": "tmt/steps/provision/artemis.py",
"new_path": "tmt/steps/provision/artemis.py",
"diff": "@@ -6,7 +6,7 @@ import click\n import requests\n \n import tmt\n-from tmt.utils import ProvisionError\n+from tmt.utils import ProvisionError, updatable_message\n \n # TODO: find out how to get this one into RPM builds.\n try:\n@@ -91,6 +91,17 @@ DEFAULT_GUEST_DATA = cast(\n }\n )\n \n+GUEST_STATE_COLOR_DEFAULT = 'green'\n+\n+GUEST_STATE_COLORS = {\n+ 'routing': 'yellow',\n+ 'provisioning': 'magenta',\n+ 'promised': 'blue',\n+ 'preparing': 'cyan',\n+ 'cancelled': 'red',\n+ 'error': 'red'\n+}\n+\n \n # Type annotation for Artemis API `GET /guests/$guestname` response.\n # Partial, not all fields necessary since plugin ignores most of them.\n@@ -577,31 +588,39 @@ class GuestArtemis(tmt.Guest):\n deadline = datetime.datetime.utcnow(\n ) + datetime.timedelta(seconds=self.provision_timeout)\n \n- while deadline > datetime.datetime.utcnow():\n- response = self.api.inspect(f'/guests/{self.guestname}')\n+ with updatable_message(\n+ 'state', indent_level=self._level()) as progress_message:\n+ while deadline > datetime.datetime.utcnow():\n+ response = self.api.inspect(f'/guests/{self.guestname}')\n \n- if response.status_code != 200:\n- raise ProvisionError(\n- f\"Failed to create, \"\n- f\"unhandled API response '{response.status_code}'.\")\n+ if response.status_code != 200:\n+ raise ProvisionError(\n+ f\"Failed to create, \"\n+ f\"unhandled API response '{response.status_code}'.\")\n \n- current = cast(GuestInspectType, response.json())\n- state = current['state']\n+ current = cast(GuestInspectType, response.json())\n+ state = current['state']\n+ state_color = GUEST_STATE_COLORS.get(\n+ state, GUEST_STATE_COLOR_DEFAULT)\n \n- if state == 'error':\n- raise ProvisionError(f'Failed to create, provisioning failed.')\n+ progress_message.update(state, color=state_color)\n \n- if state == 'ready':\n- self.guest = current['address']\n- self.info('address', self.guest, 'green')\n- break\n+ if state == 'error':\n+ raise ProvisionError(\n+ f'Failed to create, provisioning failed.')\n \n- time.sleep(self.provision_tick)\n+ if state == 'ready':\n+ break\n \n- else:\n- raise ProvisionError(\n- f'Failed to provision in the given amount '\n- f'of time (--provision-timeout={self.provision_timeout}).')\n+ time.sleep(self.provision_tick)\n+\n+ else:\n+ raise ProvisionError(\n+ f'Failed to provision in the given amount '\n+ f'of time (--provision-timeout={self.provision_timeout}).')\n+\n+ self.guest = current['address']\n+ self.info('address', self.guest, 'green')\n \n def start(self):\n \"\"\"\n"
},
{
"change_type": "MODIFY",
"old_path": "tmt/utils.py",
"new_path": "tmt/utils.py",
"diff": "@@ -11,12 +11,13 @@ import re\n import shlex\n import shutil\n import subprocess\n+import sys\n import unicodedata\n from collections import OrderedDict\n from functools import lru_cache\n from pathlib import Path\n from threading import Thread\n-from typing import Dict, Iterable\n+from typing import Dict, Iterable, Optional\n \n import fmf\n import requests\n@@ -63,6 +64,49 @@ DEFAULT_SELECT_TIMEOUT = 5\n SHELL_OPTIONS = 'set -eo pipefail'\n \n \n+def indent(\n+ key: str,\n+ value: Optional[str] = None,\n+ color: Optional[str] = None,\n+ level: int = 0) -> str:\n+ \"\"\"\n+ Indent a key/value message.\n+\n+ If both ``key`` and ``value`` are specified, ``{key}: {value}``\n+ message is rendered. Otherwise, just ``key`` is used alone. If\n+ ``value`` contains multiple lines, each but the very first line is\n+ indented by one extra level.\n+\n+ :param value: optional value to print at right side of ``key``.\n+ :param color: optional color to apply on ``key``.\n+ :param level: number of indentation levels. Each level is indented\n+ by :py:data:`INDENT` spaces.\n+ \"\"\"\n+\n+ indent = ' ' * INDENT * level\n+ deeper = ' ' * INDENT * (level + 1)\n+\n+ # Colorize\n+ if color is not None:\n+ key = style(key, fg=color)\n+\n+ # Handle key only\n+ if value is None:\n+ message = key\n+\n+ # Handle key + value\n+ else:\n+ # Multiline content indented deeper\n+ if isinstance(value, str):\n+ lines = value.splitlines()\n+ if len(lines) > 1:\n+ value = ''.join([f\"\\n{deeper}{line}\" for line in lines])\n+\n+ message = f'{key}: {value}'\n+\n+ return indent + message\n+\n+\n class Config(object):\n \"\"\" User configuration \"\"\"\n \n@@ -243,24 +287,12 @@ class Common(object):\n \n def _indent(self, key, value=None, color=None, shift=0):\n \"\"\" Indent message according to the object hierarchy \"\"\"\n- level = self._level() + shift\n- indent = ' ' * INDENT * level\n- deeper = ' ' * INDENT * (level + 1)\n- # Colorize\n- if color is not None:\n- key = style(key, fg=color)\n- # Handle key only\n- if value is None:\n- message = key\n- # Handle key + value\n- else:\n- # Multiline content indented deeper\n- if isinstance(value, str):\n- lines = value.splitlines()\n- if len(lines) > 1:\n- value = ''.join([f\"\\n{deeper}{line}\" for line in lines])\n- message = f'{key}: {value}'\n- return indent + message\n+\n+ return indent(\n+ key,\n+ value=value,\n+ color=color,\n+ level=self._level() + shift)\n \n def _log(self, message):\n \"\"\" Append provided message to the current log \"\"\"\n@@ -1780,3 +1812,78 @@ def get_distgit_handler(remotes=None, usage_name=None):\n def get_distgit_handler_names():\n \"\"\" All known distgit handlers \"\"\"\n return [i.usage_name for i in DistGitHandler.__subclasses__()]\n+\n+\n+class updatable_message(contextlib.AbstractContextManager):\n+ \"\"\" Updatable message suitable for progress-bar-like reporting \"\"\"\n+\n+ def __init__(\n+ self,\n+ key: str,\n+ enabled: bool = True,\n+ indent_level: int = 0,\n+ key_color: Optional[str] = None,\n+ default_value_color: Optional[str] = None\n+ ) -> None:\n+ \"\"\"\n+ Updatable message suitable for progress-bar-like reporting.\n+\n+ .. code:block:: python3\n+\n+ with updatable_message('foo') as message:\n+ while ...:\n+ ...\n+\n+ # check state of remote request, and update message\n+ state = remote_api.check()\n+ message.update(state)\n+\n+ :param key: a string to use as the left-hand part of logged message.\n+ :param enabled: if unset, no output would be performed.\n+ :param indent_level: desired indentation level.\n+ :param key_color: optional color to apply to ``key``.\n+ :param default_color: optional color to apply to value when\n+ :py:meth:`update` is called with ``color`` left out.\n+ \"\"\"\n+\n+ self.key = key\n+ self.enabled = enabled\n+ self.indent_level = indent_level\n+ self.key_color = key_color\n+ self.default_value_color = default_value_color\n+\n+ # No progress if terminal not attached\n+ if not sys.stdout.isatty():\n+ self.enabled = False\n+\n+ self._previous_line = None\n+\n+ def __enter__(self) -> 'updatable_message':\n+ return self\n+\n+ def __exit__(self, *args):\n+ sys.stdout.write('\\n')\n+ sys.stdout.flush()\n+\n+ def update(self, value, color=None) -> None:\n+ if not self.enabled:\n+ return\n+\n+ if self._previous_line is not None:\n+ message = value.ljust(len(self._previous_line))\n+\n+ else:\n+ message = value\n+\n+ self._previous_line = value\n+\n+ message = indent(\n+ self.key,\n+ value=style(\n+ message,\n+ fg=color or self.default_value_color),\n+ color=self.key_color,\n+ level=self.indent_level)\n+\n+ sys.stdout.write(f\"\\r{message}\")\n+ sys.stdout.flush()\n"
}
] |
211b9e86f98414dcbf0f6a88608e9dd08cd79c1b | teemtee/tmt | 18.06.2022 17:04:39 | MIT License | Enhance tmt.utils.retry_session with timeout support
`retry_session` is now usable as a context manager, and accepts a
timeout to honor for each HTTP request.
Current `retry_session` was overlapping with similar functionality in
`artemis` plugin, so merged both together and threw context manager in
too, because `retry_session` was used in that manner exclusively. | [
{
"change_type": "MODIFY",
"old_path": "tmt/steps/discover/__init__.py",
"new_path": "tmt/steps/discover/__init__.py",
"diff": "@@ -273,8 +273,8 @@ class DiscoverPlugin(tmt.steps.Plugin):\n if handler.re_ignore_extensions.search(source_name):\n continue\n self.debug(f\"Download sources from '{url}'.\")\n- session = tmt.utils.retry_session()\n- response = session.get(url)\n+ with tmt.utils.retry_session() as session:\n+ response = session.get(url)\n response.raise_for_status()\n os.makedirs(target_dir, exist_ok=True)\n with open(os.path.join(target_dir, source_name), 'wb') as tarball:\n"
},
{
"change_type": "MODIFY",
"old_path": "tmt/steps/provision/artemis.py",
"new_path": "tmt/steps/provision/artemis.py",
"diff": "@@ -5,11 +5,10 @@ from typing import Any, Dict, List, Optional, cast\n \n import click\n import requests\n-from requests.packages.urllib3.util.retry import Retry as RequestsRetry\n \n import tmt\n import tmt.steps.provision\n-from tmt.utils import ProvisionError, updatable_message\n+from tmt.utils import ProvisionError, retry_session, updatable_message\n \n if sys.version_info >= (3, 8):\n from typing import TypedDict\n@@ -118,71 +117,22 @@ GuestInspectType = TypedDict(\n )\n \n \n-class TimeoutHTTPAdapter(requests.adapters.HTTPAdapter):\n- \"\"\"\n- Spice up request's session with custom timeout.\n- \"\"\"\n-\n- def __init__(self, *args: Any, **kwargs: Any) -> None:\n- self.timeout = kwargs.pop('timeout', DEFAULT_API_TIMEOUT)\n-\n- super().__init__(*args, **kwargs)\n-\n- def send( # type: ignore # does not match superclass type on purpose\n- self,\n- request: requests.PreparedRequest,\n- **kwargs: Any) -> requests.Response:\n- kwargs.setdefault('timeout', self.timeout)\n-\n- return super().send(request, **kwargs)\n-\n-\n class ArtemisAPI:\n- def install_http_retries(\n- self,\n- timeout: int,\n- retries: int,\n- retry_backoff_factor: int\n- ) -> None:\n- \"\"\"\n- Install custom \"retry strategy\" and timeout to our HTTP session.\n-\n- Strategy and timeout work together, \"consuming\" the timeout as\n- specified by the strategy.\n- \"\"\"\n+ def __init__(self, guest: 'GuestArtemis') -> None:\n+ self._guest = guest\n \n- retry_strategy = RequestsRetry(\n- total=retries,\n- status_forcelist=[\n+ self.http_session = retry_session.create(\n+ retries=guest.api_retries,\n+ backoff_factor=guest.api_retry_backoff_factor,\n+ allowed_methods=('HEAD', 'GET', 'POST', 'DELETE', 'PUT'),\n+ status_forcelist=(\n 429, # Too Many Requests\n 500, # Internal Server Error\n 502, # Bad Gateway\n 503, # Service Unavailable\n 504 # Gateway Timeout\n- ],\n- method_whitelist=[\n- 'HEAD', 'GET', 'POST', 'DELETE', 'PUT'\n- ],\n- backoff_factor=retry_backoff_factor\n- )\n-\n- timeout_adapter = TimeoutHTTPAdapter(\n- timeout=timeout,\n- max_retries=retry_strategy\n- )\n-\n- self.http_session.mount('https://', timeout_adapter)\n- self.http_session.mount('http://', timeout_adapter)\n-\n- def __init__(self, guest: 'GuestArtemis') -> None:\n- self._guest = guest\n-\n- self.http_session = requests.Session()\n-\n- self.install_http_retries(\n- timeout=guest.api_timeout,\n- retries=guest.api_retries,\n- retry_backoff_factor=guest.api_retry_backoff_factor\n+ ),\n+ timeout=guest.api_timeout\n )\n \n def query(\n"
},
{
"change_type": "MODIFY",
"old_path": "tmt/steps/provision/testcloud.py",
"new_path": "tmt/steps/provision/testcloud.py",
"diff": "@@ -310,7 +310,8 @@ class GuestTestcloud(tmt.GuestSsh):\n wait = 1\n while True:\n try:\n- response = retry_session().get(url)\n+ with retry_session() as session:\n+ response = session.get(url)\n if response.ok:\n return response\n except requests.RequestException:\n"
},
{
"change_type": "MODIFY",
"old_path": "tmt/utils.py",
"new_path": "tmt/utils.py",
"diff": "@@ -25,8 +25,8 @@ from typing import (IO, TYPE_CHECKING, Any, Dict, Generator, Iterable, List,\n import click\n import fmf\n import requests\n+import requests.adapters\n from click import echo, style, wrap_text\n-from requests.adapters import HTTPAdapter\n from requests.packages.urllib3.util.retry import Retry\n from ruamel.yaml import YAML, scalarstring\n from ruamel.yaml.comments import CommentedMap\n@@ -77,6 +77,10 @@ DEFAULT_SELECT_TIMEOUT = 5\n # Shell options to be set for all run shell scripts\n SHELL_OPTIONS = 'set -eo pipefail'\n \n+# Defaults for HTTP/HTTPS retries and timeouts (see `retry_session()`).\n+DEFAULT_RETRY_SESSION_RETRIES: int = 3\n+DEFAULT_RETRY_SESSION_BACKOFF_FACTOR: float = 0.1\n+\n # A stand-in variable for generic use.\n T = TypeVar('T')\n \n@@ -1559,30 +1563,86 @@ def public_git_url(url: str) -> str:\n return url\n \n \n-def retry_session(\n- retries: int = 3,\n- backoff_factor: float = 0.1,\n- method_whitelist: bool = False,\n- status_forcelist: Tuple[int, ...] = (429, 500, 502, 503, 504)\n- ) -> requests.Session:\n+class TimeoutHTTPAdapter(requests.adapters.HTTPAdapter):\n+ \"\"\"\n+ Spice up request's session with custom timeout.\n \"\"\"\n- Create a requests.Session() that retries on request failure.\n \n- 'method_whitelist' is set to False to retry on all http request methods\n- by default.\n+ def __init__(self, *args: Any, **kwargs: Any) -> None:\n+ self.timeout = kwargs.pop('timeout', None)\n+\n+ super().__init__(*args, **kwargs)\n+\n+ def send( # type: ignore # does not match superclass type on purpose\n+ self,\n+ request: requests.PreparedRequest,\n+ **kwargs: Any) -> requests.Response:\n+ kwargs.setdefault('timeout', self.timeout)\n+\n+ return super().send(request, **kwargs)\n+\n+\n+class retry_session(contextlib.AbstractContextManager): # type: ignore\n+ \"\"\"\n+ Context manager for requests.Session() with retries and timeout\n \"\"\"\n- session = requests.Session()\n- retry = Retry(\n- total=retries,\n- backoff_factor=backoff_factor,\n- status_forcelist=status_forcelist,\n- method_whitelist=method_whitelist,\n- raise_on_status=False,\n- )\n- adapter = HTTPAdapter(max_retries=retry)\n- session.mount('http://', adapter)\n- session.mount('https://', adapter)\n- return session\n+ @staticmethod\n+ def create(\n+ retries: int = DEFAULT_RETRY_SESSION_RETRIES,\n+ backoff_factor: float = DEFAULT_RETRY_SESSION_BACKOFF_FACTOR,\n+ allowed_methods: Optional[Tuple[str, ...]] = None,\n+ status_forcelist: Optional[Tuple[int, ...]] = None,\n+ timeout: Optional[int] = None\n+ ) -> requests.Session:\n+ retry_strategy = Retry(\n+ total=retries,\n+ status_forcelist=status_forcelist,\n+ # `method_whitelist`` has been renamed to `allowed_methods` since\n+ # urllib3 1.26, and it will be removed in urllib3 2.0.\n+ # `allowed_methods` is therefore the future-proof name, but for the\n+ # sake of backward compatibility, internally we need to use the\n+ # deprecated parameter for now. Or request newer urllib3, but that\n+ # might a problem because of RPM availability.\n+ method_whitelist=allowed_methods,\n+ backoff_factor=backoff_factor)\n+\n+ if timeout is not None:\n+ http_adapter: requests.adapters.HTTPAdapter = TimeoutHTTPAdapter(\n+ timeout=timeout, max_retries=retry_strategy)\n+ else:\n+ http_adapter = requests.adapters.HTTPAdapter(\n+ max_retries=retry_strategy)\n+\n+ session = requests.Session()\n+ session.mount('http://', http_adapter)\n+ session.mount('https://', http_adapter)\n+\n+ return session\n+\n+ def __init__(\n+ self,\n+ retries: int = DEFAULT_RETRY_SESSION_RETRIES,\n+ backoff_factor: float = DEFAULT_RETRY_SESSION_BACKOFF_FACTOR,\n+ allowed_methods: Optional[Tuple[str, ...]] = None,\n+ status_forcelist: Optional[Tuple[int, ...]] = None,\n+ timeout: Optional[int] = None\n+ ) -> None:\n+ self.retries = retries\n+ self.backoff_factor = backoff_factor\n+ self.allowed_methods = allowed_methods\n+ self.status_forcelist = status_forcelist\n+ self.timeout = timeout\n+\n+ def __enter__(self) -> requests.Session:\n+ return self.create(\n+ retries=self.retries,\n+ backoff_factor=self.backoff_factor,\n+ allowed_methods=self.allowed_methods,\n+ status_forcelist=self.status_forcelist,\n+ timeout=self.timeout)\n+\n+ def __exit__(self, *args: Any) -> None:\n+ pass\n \n \n def remove_color(text: str) -> str:\n"
}
] |
e9b37078d9a877a4ad5de424e82ccfc67dae61ca | teemtee/tmt | 10.06.2022 09:16:57 | MIT License | Convert guest implementations to use serializable container for load/save
Instead of custom load/save methods, use those based on dataclasses. We
gain type annotations, automagic conversion from/to YAML, no need for
explicit moves of fields between instances and serialized data. | [
{
"change_type": "MODIFY",
"old_path": "tests/multihost/provision/test.sh",
"new_path": "tests/multihost/provision/test.sh",
"diff": "@@ -25,7 +25,7 @@ rlJournalStart\n \n # 2 guests without role are saved\n guests=\"$run/noroles/provision/guests.yaml\"\n- rlAssertNotGrep \"role\" $guests\n+ rlAssertNotGrep \"role: [a-z]\" $guests \"File '$guests' should leave role unspecified\"\n rlAssertGrep \"client\" $guests\n rlAssertGrep \"server\" $guests\n \n"
},
{
"change_type": "MODIFY",
"old_path": "tmt/steps/provision/__init__.py",
"new_path": "tmt/steps/provision/__init__.py",
"diff": "@@ -1,4 +1,5 @@\n import collections\n+import dataclasses\n import datetime\n import os\n import random\n@@ -8,12 +9,13 @@ import string\n import subprocess\n import tempfile\n import time\n-from typing import Optional, Type\n+from typing import Dict, List, Optional, Type\n \n import click\n import fmf\n \n import tmt\n+import tmt.plugins\n import tmt.utils\n \n # Timeout in seconds of waiting for a connection after reboot\n@@ -50,8 +52,8 @@ class Provision(tmt.steps.Step):\n f\"Provision step names must be unique for multihost testing. \"\n f\"Duplicate names: {duplicate_string} in plan '{plan.name}'.\")\n # List of provisioned guests and loaded guest data\n- self._guests = []\n- self._guest_data = {}\n+ self._guests: List['Guest'] = []\n+ self._guest_data: Dict[str, 'GuestData'] = {}\n self.is_multihost = False\n \n def load(self, extra_keys=None):\n@@ -59,7 +61,13 @@ class Provision(tmt.steps.Step):\n extra_keys = extra_keys or []\n super().load(extra_keys)\n try:\n- self._guest_data = tmt.utils.yaml_to_dict(self.read('guests.yaml'))\n+ raw_guest_data = tmt.utils.yaml_to_dict(self.read('guests.yaml'))\n+\n+ self._guest_data = {\n+ name: tmt.utils.SerializableContainer.unserialize(guest_data)\n+ for name, guest_data in raw_guest_data.items()\n+ }\n+\n except tmt.utils.FileError:\n self.debug('Provisioned guests not found.', level=2)\n \n@@ -68,9 +76,10 @@ class Provision(tmt.steps.Step):\n data = data or {}\n super().save(data)\n try:\n- guests = dict(\n- [(guest.name, guest.save()) for guest in self.guests()])\n- self.write('guests.yaml', tmt.utils.dict_to_yaml(guests))\n+ raw_guest_data = {guest.name: guest.save().to_serialized()\n+ for guest in self.guests()}\n+\n+ self.write('guests.yaml', tmt.utils.dict_to_yaml(raw_guest_data))\n except tmt.utils.FileError:\n self.debug('Failed to save provisioned guests.')\n \n@@ -154,7 +163,7 @@ class Provision(tmt.steps.Step):\n if save:\n self.save()\n \n- def guests(self):\n+ def guests(self) -> List['Guest']:\n \"\"\" Return the list of all provisioned guests \"\"\"\n return self._guests\n \n@@ -234,6 +243,20 @@ class ProvisionPlugin(tmt.steps.Plugin):\n \"\"\" Remove the images of one particular plugin \"\"\"\n \n \n+@dataclasses.dataclass\n+class GuestData(tmt.utils.SerializableContainer):\n+ \"\"\"\n+ Keys necessary to describe, create, save and restore a guest.\n+\n+ Very basic set of keys shared across all known guest classes.\n+ \"\"\"\n+\n+ # guest role in the multihost scenario\n+ role: Optional[str] = None\n+ # hostname or ip address\n+ guest: Optional[str] = None\n+\n+\n class Guest(tmt.utils.Common):\n \"\"\"\n Guest provisioned for test execution\n@@ -244,26 +267,34 @@ class Guest(tmt.utils.Common):\n to Guest subclasses to provide one working in their respective\n infrastructure.\n \n- The following keys are expected in the 'data' dictionary::\n+ The following keys are expected in the 'data' container::\n \n role ....... guest role in the multihost scenario\n guest ...... name, hostname or ip address\n \n- These are by default imported into instance attributes (see the\n- class attribute '_keys' below).\n+ These are by default imported into instance attributes.\n \"\"\"\n \n+ # Used by save() to construct the correct container for keys.\n+ _data_class = GuestData\n+\n+ role: Optional[str]\n+ guest: Optional[str]\n+\n+ # Flag to indicate localhost guest, requires special handling\n+ localhost = False\n+\n+ # TODO: do we need this list? Can whatever code is using it use _data_class directly?\n # List of supported keys\n # (used for import/export to/from attributes during load and save)\n- _keys = ['role', 'guest']\n+ @property\n+ def _keys(self) -> List[str]:\n+ return list(self._data_class.keys())\n \n- def __init__(self, data, name=None, parent=None):\n+ def __init__(self, data: GuestData, name=None, parent=None):\n \"\"\" Initialize guest data \"\"\"\n super().__init__(parent, name)\n- # Initialize role, it will be overridden by load() if specified\n- self.role = None\n- # Flag to indicate localhost guest, requires special handling\n- self.localhost = False\n+\n self.load(data)\n \n def _random_name(self, prefix='', length=16):\n@@ -280,7 +311,7 @@ class Guest(tmt.utils.Common):\n _, run_id = os.path.split(self.parent.plan.my_run.workdir)\n return self._random_name(prefix=\"tmt-{0}-\".format(run_id[-3:]))\n \n- def load(self, data):\n+ def load(self, data: GuestData) -> None:\n \"\"\"\n Load guest data into object attributes for easy access\n \n@@ -293,10 +324,9 @@ class Guest(tmt.utils.Common):\n line options / L2 metadata / user configuration and wake up data\n stored by the save() method below.\n \"\"\"\n- for key in self._keys:\n- setattr(self, key, data.get(key))\n+ data.inject_to(self)\n \n- def save(self):\n+ def save(self) -> GuestData:\n \"\"\"\n Save guest data for future wake up\n \n@@ -305,12 +335,7 @@ class Guest(tmt.utils.Common):\n the guest. Everything needed to attach to a running instance\n should be added into the data dictionary by child classes.\n \"\"\"\n- data = dict()\n- for key in self._keys:\n- value = getattr(self, key)\n- if value is not None:\n- data[key] = value\n- return data\n+ return self._data_class.extract_from(self)\n \n def wake(self):\n \"\"\"\n@@ -562,6 +587,26 @@ class Guest(tmt.utils.Common):\n return []\n \n \n+@dataclasses.dataclass\n+class GuestSshData(GuestData):\n+ \"\"\"\n+ Keys necessary to describe, create, save and restore a guest with SSH\n+ capability.\n+\n+ Derived from GuestData, this class adds keys relevant for guests that can be\n+ reached over SSH.\n+ \"\"\"\n+\n+ # port to connect to\n+ port: Optional[int] = None\n+ # user name to log in\n+ user: Optional[str] = None\n+ # path to the private key\n+ key: List[str] = dataclasses.field(default_factory=list)\n+ # password\n+ password: Optional[str] = None\n+\n+\n class GuestSsh(Guest):\n \"\"\"\n Guest provisioned for test execution, capable of accepting SSH connections\n@@ -575,13 +620,15 @@ class GuestSsh(Guest):\n key ........ path to the private key (str or list)\n password ... password\n \n- These are by default imported into instance attributes (see the\n- class attribute '_keys' below).\n+ These are by default imported into instance attributes.\n \"\"\"\n \n- # List of supported keys\n- # (used for import/export to/from attributes during load and save)\n- _keys = Guest._keys + ['port', 'user', 'key', 'password']\n+ _data_class = GuestSshData\n+\n+ port: Optional[int]\n+ user: Optional[str]\n+ key: List[str]\n+ password: Optional[str]\n \n # Master ssh connection process and socket path\n _ssh_master_process = None\n"
},
{
"change_type": "MODIFY",
"old_path": "tmt/steps/provision/artemis.py",
"new_path": "tmt/steps/provision/artemis.py",
"diff": "@@ -1,3 +1,4 @@\n+import dataclasses\n import datetime\n import sys\n import time\n@@ -36,38 +37,7 @@ SUPPORTED_API_VERSIONS = (\n # should be perfectly fine.\n DEFAULT_API_VERSION = SUPPORTED_API_VERSIONS[0]\n \n-# Type annotation for \"data\" package describing a guest instance. Passed\n-# between load() and save() calls.\n-StepStateType = TypedDict(\n- 'StepStateType',\n- {\n- # API\n- 'api-url': str,\n- 'api-version': str,\n-\n- # Guest request properties\n- 'arch': str,\n- 'image': str,\n- 'hardware': Any,\n- 'pool': Optional[str],\n- 'priority-group': str,\n- 'keyname': str,\n- 'user-data': Dict[str, str],\n-\n- # Provided by Artemis response\n- 'guestname': Optional[str],\n- 'guest': Optional[str],\n- 'user': str,\n-\n- # Timeouts and deadlines\n- 'provision-timeout': int,\n- 'provision-tick': int,\n- 'api-timeout': int,\n- 'api-retries': int,\n- 'api-retry-backoff-factor': int\n- }\n- )\n-\n+DEFAULT_API_URL = 'http://127.0.0.1:8001'\n DEFAULT_USER = 'root'\n DEFAULT_ARCH = 'x86_64'\n DEFAULT_PRIORITY_GROUP = 'default-priority'\n@@ -79,21 +49,39 @@ DEFAULT_API_RETRIES = 10\n # Should lead to delays of 0.5, 1, 2, 4, 8, 16, 32, 64, 128, 256 seconds\n DEFAULT_RETRY_BACKOFF_FACTOR = 1\n \n-DEFAULT_GUEST_DATA = cast(\n- StepStateType, {\n- 'api-version': DEFAULT_API_VERSION,\n- 'arch': DEFAULT_ARCH,\n- 'priority-group': DEFAULT_PRIORITY_GROUP,\n- 'keyname': DEFAULT_KEYNAME,\n- 'user-data': {},\n- 'user': DEFAULT_USER,\n- 'provision-timeout': DEFAULT_PROVISION_TIMEOUT,\n- 'provision-tick': DEFAULT_PROVISION_TICK,\n- 'api-timeout': DEFAULT_API_TIMEOUT,\n- 'api-retries': DEFAULT_API_RETRIES,\n- 'api-retry-backoff-factor': DEFAULT_RETRY_BACKOFF_FACTOR\n- }\n- )\n+# Type annotation for \"data\" package describing a guest instance. Passed\n+# between load() and save() calls\n+# TODO: get rid of `ignore` once superclass is no longer `Any`\n+\n+\n+@dataclasses.dataclass\n+class ArtemisGuestData(tmt.steps.provision.GuestSshData): # type: ignore[misc]\n+ # Override parent class with our defaults\n+ user: str = DEFAULT_USER\n+\n+ # API\n+ api_url: str = DEFAULT_API_URL\n+ api_version: str = DEFAULT_API_VERSION\n+\n+ # Guest request properties\n+ arch: str = DEFAULT_ARCH\n+ image: Optional[str] = None\n+ hardware: Optional[Any] = None\n+ pool: Optional[str] = None\n+ priority_group: str = DEFAULT_PRIORITY_GROUP\n+ keyname: str = DEFAULT_KEYNAME\n+ user_data: Dict[str, str] = dataclasses.field(default_factory=dict)\n+\n+ # Provided by Artemis response\n+ guestname: Optional[str] = None\n+\n+ # Timeouts and deadlines\n+ provision_timeout: int = DEFAULT_PROVISION_TIMEOUT\n+ provision_tick: int = DEFAULT_PROVISION_TICK\n+ api_timeout: int = DEFAULT_API_TIMEOUT\n+ api_retries: int = DEFAULT_API_RETRIES\n+ api_retry_backoff_factor: int = DEFAULT_RETRY_BACKOFF_FACTOR\n+\n \n GUEST_STATE_COLOR_DEFAULT = 'green'\n \n@@ -377,11 +365,11 @@ class ProvisionArtemis(\n def default(self, option: str, default: Optional[Any] = None) -> Any:\n \"\"\" Return default data for given option \"\"\"\n \n- return DEFAULT_GUEST_DATA.get(option, default)\n+ return getattr(ArtemisGuestData(), option.replace('-', '_'), default)\n \n # TODO: use better types once superclass gains its annotations\n def wake(self, keys: Optional[List[str]] = None,\n- data: Optional[StepStateType] = None) -> None:\n+ data: Optional[ArtemisGuestData] = None) -> None:\n \"\"\" Wake up the plugin, process data, apply options \"\"\"\n \n super().wake(keys=keys, data=data)\n@@ -410,25 +398,23 @@ class ProvisionArtemis(\n except ValueError:\n raise ProvisionError('Cannot parse user-data.')\n \n- data: StepStateType = {\n- 'api-url': self.get('api-url'),\n- 'api-version': api_version,\n- 'arch': self.get('arch'),\n- 'image': self.get('image'),\n- 'hardware': self.get('hardware'),\n- 'pool': self.get('pool'),\n- 'priority-group': self.get('priority-group'),\n- 'keyname': self.get('keyname'),\n- 'user-data': user_data,\n- 'guestname': None,\n- 'guest': None,\n- 'user': DEFAULT_USER,\n- 'provision-timeout': self.get('provision-timeout'),\n- 'provision-tick': self.get('provision-tick'),\n- 'api-timeout': self.get('api-timeout'),\n- 'api-retries': self.get('api-retries'),\n- 'api-retry-backoff-factor': self.get('api-retry-backoff-factor')\n- }\n+ data = ArtemisGuestData(\n+ api_url=self.get('api-url'),\n+ api_version=api_version,\n+ arch=self.get('arch'),\n+ image=self.get('image'),\n+ hardware=self.get('hardware'),\n+ pool=self.get('pool'),\n+ priority_group=self.get('priority-group'),\n+ keyname=self.get('keyname'),\n+ user_data=user_data,\n+ user=self.get('user'),\n+ provision_timeout=self.get('provision-timeout'),\n+ provision_tick=self.get('provision-tick'),\n+ api_timeout=self.get('api-timeout'),\n+ api_retries=self.get('api-retries'),\n+ api_retry_backoff_factor=self.get('api-retry-backoff-factor')\n+ )\n \n self._guest = GuestArtemis(data, name=self.name, parent=self.step)\n self._guest.start()\n@@ -446,6 +432,29 @@ class GuestArtemis(tmt.GuestSsh): # type: ignore[misc]\n The following keys are expected in the 'data' dictionary:\n \"\"\"\n \n+ # API\n+ api_url: str\n+ api_version: str\n+\n+ # Guest request properties\n+ arch: str\n+ image: str\n+ hardware: Optional[Any]\n+ pool: Optional[str]\n+ priority_group: str\n+ keyname: str\n+ user_data: Dict[str, str]\n+\n+ # Provided by Artemis response\n+ guestname: Optional[str]\n+\n+ # Timeouts and deadlines\n+ provision_timeout: int\n+ provision_tick: int\n+ api_timeout: int\n+ api_retries: int\n+ api_retry_backoff_factor: int\n+\n _api: Optional[ArtemisAPI] = None\n \n @property\n@@ -455,50 +464,6 @@ class GuestArtemis(tmt.GuestSsh): # type: ignore[misc]\n \n return self._api\n \n- def load(self, data: StepStateType) -> None:\n- super().load(data)\n-\n- self.api_url = data['api-url']\n- self.api_version = data['api-version']\n- self.arch = data['arch']\n- self.image = data['image']\n- self.hardware = data['hardware']\n- self.pool = data['pool']\n- self.priority_group = data['priority-group']\n- self.keyname = data['keyname']\n- self.user_data = data['user-data']\n- self.guestname = data['guestname']\n- self.guest = data['guest']\n- self.user = data['user']\n- self.provision_timeout = data['provision-timeout']\n- self.provision_tick = data['provision-tick']\n- self.api_timeout = data['api-timeout']\n- self.api_retries = data['api-retries']\n- self.api_retry_backoff_factor = data['api-retry-backoff-factor']\n-\n- def save(self) -> StepStateType:\n- data = cast(StepStateType, super().save())\n-\n- data['api-url'] = self.api_url\n- data['api-version'] = self.api_version\n- data['arch'] = self.arch\n- data['image'] = self.image\n- data['hardware'] = self.hardware\n- data['pool'] = self.pool\n- data['priority-group'] = self.priority_group\n- data['keyname'] = self.keyname\n- data['user-data'] = self.user_data\n- data['guestname'] = self.guestname\n- data['guest'] = self.guest\n- data['user'] = self.user\n- data['provision-timeout'] = self.provision_timeout\n- data['provision-tick'] = self.provision_tick\n- data['api-timeout'] = self.api_timeout\n- data['api-retries'] = self.api_retries\n- data['api-retry-backoff-factor'] = self.api_retry_backoff_factor\n-\n- return data\n-\n def _create(self) -> None:\n environment: Dict[str, Any] = {\n 'hw': {\n"
},
{
"change_type": "MODIFY",
"old_path": "tmt/steps/provision/connect.py",
"new_path": "tmt/steps/provision/connect.py",
"diff": "@@ -91,23 +91,27 @@ class ProvisionConnect(tmt.steps.provision.ProvisionPlugin):\n if not guest:\n raise tmt.utils.SpecificationError(\n 'Provide a host name or an ip address to connect.')\n- data = dict(guest=guest, user=user, role=self.get('role'))\n+ data = tmt.steps.provision.GuestSshData(\n+ role=self.get('role'),\n+ guest=guest,\n+ user=user\n+ )\n self.info('guest', guest, 'green')\n self.info('user', user, 'green')\n if port:\n self.info('port', port, 'green')\n- data['port'] = port\n+ data.port = port\n \n # Use provided password for authentication\n if password:\n self.info('password', password, 'green')\n self.debug('Using password authentication.')\n- data['password'] = password\n+ data.password = password\n # Default to using a private key (user can have configured one)\n else:\n self.info('key', key or 'not provided', 'green')\n self.debug('Using private key authentication.')\n- data['key'] = key\n+ data.key = key\n \n # And finally create the guest\n self._guest = tmt.GuestSsh(data, name=self.name, parent=self.step)\n"
},
{
"change_type": "MODIFY",
"old_path": "tmt/steps/provision/local.py",
"new_path": "tmt/steps/provision/local.py",
"diff": "@@ -37,7 +37,10 @@ class ProvisionLocal(tmt.steps.provision.ProvisionPlugin):\n super().go()\n \n # Create a GuestLocal instance\n- data = {'guest': 'localhost', 'role': self.get('role')}\n+ data = tmt.steps.provision.GuestSshData(\n+ guest='localhost',\n+ role=self.get('role')\n+ )\n self._guest = GuestLocal(data, name=self.name, parent=self.step)\n \n def guest(self):\n@@ -52,10 +55,7 @@ class ProvisionLocal(tmt.steps.provision.ProvisionPlugin):\n class GuestLocal(tmt.Guest):\n \"\"\" Local Host \"\"\"\n \n- def __init__(self, data, name=None, parent=None):\n- \"\"\" Initialize guest data \"\"\"\n- super().__init__(data, name, parent)\n- self.localhost = True\n+ localhost = True\n \n def ansible(self, playbook, extra_args=None):\n \"\"\" Prepare localhost using ansible playbook \"\"\"\n"
},
{
"change_type": "MODIFY",
"old_path": "tmt/steps/provision/podman.py",
"new_path": "tmt/steps/provision/podman.py",
"diff": "@@ -1,4 +1,6 @@\n+import dataclasses\n import os\n+from typing import Optional\n \n import click\n \n@@ -12,6 +14,17 @@ CONNECTION_TIMEOUT = 60\n DEFAULT_IMAGE = \"fedora\"\n DEFAULT_USER = \"root\"\n \n+# TODO: get rid of `ignore` once superclass is no longer `Any`\n+\n+\n+@dataclasses.dataclass\n+class PodmanGuestData(tmt.steps.provision.GuestData): # type: ignore[misc]\n+ image: str = DEFAULT_IMAGE\n+ user: str = DEFAULT_USER\n+ force_pull: bool = False\n+\n+ container: Optional[str] = None\n+\n \n class ProvisionPodman(tmt.steps.provision.ProvisionPlugin):\n \"\"\"\n@@ -58,14 +71,10 @@ class ProvisionPodman(tmt.steps.provision.ProvisionPlugin):\n \n def default(self, option, default=None):\n \"\"\" Return default data for given option \"\"\"\n- # Use 'fedora' as a default image\n- if option == 'image':\n- return DEFAULT_IMAGE\n- # Use 'root' as a default user\n- if option == 'user':\n- return DEFAULT_USER\n- # No other defaults available\n- return default\n+ if option == 'pull':\n+ return PodmanGuestData().force_pull\n+\n+ return getattr(PodmanGuestData(), option.replace('-', '_'), default)\n \n def wake(self, keys=None, data=None):\n \"\"\" Wake up the plugin, process data, apply options \"\"\"\n@@ -85,9 +94,15 @@ class ProvisionPodman(tmt.steps.provision.ProvisionPlugin):\n self.info('image', f\"{self.get('image')}{pull}\", 'green')\n \n # Prepare data for the guest instance\n- data = dict()\n- for key in self._keys + self._common_keys:\n- data[key] = self.get(key)\n+ data_from_options = {\n+ key: self.get(key)\n+ for key in PodmanGuestData.keys()\n+ if key != 'force_pull'\n+ }\n+\n+ data_from_options['force_pull'] = self.get('pull')\n+\n+ data = PodmanGuestData(**data_from_options)\n \n # Create a new GuestTestcloud instance and start it\n self._guest = GuestContainer(data, name=self.name, parent=self.step)\n@@ -105,23 +120,12 @@ class ProvisionPodman(tmt.steps.provision.ProvisionPlugin):\n class GuestContainer(tmt.Guest):\n \"\"\" Container Instance \"\"\"\n \n- def load(self, data):\n- \"\"\" Load guest data and initialize attributes \"\"\"\n- super().load(data)\n-\n- # Load basic data\n- self.image = data.get('image')\n- self.force_pull = data.get('pull')\n- self.container = data.get('container')\n- self.user = data.get('user')\n-\n- def save(self):\n- \"\"\" Save guest data for future wake up \"\"\"\n- data = super().save()\n- data['container'] = self.container\n- data['image'] = self.image\n- data['user'] = self.user\n- return data\n+ _data_class = PodmanGuestData\n+\n+ image: Optional[str]\n+ container: Optional[str]\n+ user: str\n+ force_pull: bool\n \n def wake(self):\n \"\"\" Wake up the guest \"\"\"\n"
},
{
"change_type": "MODIFY",
"old_path": "tmt/steps/provision/testcloud.py",
"new_path": "tmt/steps/provision/testcloud.py",
"diff": "@@ -1,5 +1,6 @@\n # coding: utf-8\n \n+import dataclasses\n import os\n import platform\n import re\n@@ -148,6 +149,31 @@ NON_KVM_TIMEOUT_COEF = 10 # times\n # SSH key type, set None for ssh-keygen default one\n SSH_KEYGEN_TYPE = \"ecdsa\"\n \n+DEFAULT_USER = 'root'\n+DEFAULT_MEMORY = 2048\n+DEFAULT_DISK = 10\n+DEFAULT_IMAGE = 'fedora'\n+DEFAULT_CONNECTION = 'session'\n+DEFAULT_ARCH = platform.machine()\n+\n+# TODO: get rid of `ignore` once superclass is no longer `Any`\n+\n+\n+@dataclasses.dataclass\n+class TestcloudGuestData(\n+ tmt.steps.provision.GuestSshData): # type: ignore[misc]\n+ # Override parent class with our defaults\n+ user: str = DEFAULT_USER\n+\n+ image: str = DEFAULT_IMAGE\n+ memory: int = DEFAULT_MEMORY\n+ disk: int = DEFAULT_DISK\n+ connection: str = DEFAULT_CONNECTION\n+ arch: str = DEFAULT_ARCH\n+\n+ image_url: Optional[str] = None\n+ instance_name: Optional[str] = None\n+\n \n class ProvisionTestcloud(tmt.steps.provision.ProvisionPlugin):\n \"\"\"\n@@ -231,26 +257,17 @@ class ProvisionTestcloud(tmt.steps.provision.ProvisionPlugin):\n \n def default(self, option, default=None):\n \"\"\" Return default data for given option \"\"\"\n- defaults = {\n- 'user': 'root',\n- 'memory': 2048,\n- 'disk': 10,\n- 'image': 'fedora',\n- 'connection': 'session',\n- 'arch': platform.machine()\n- }\n- if option in defaults:\n- return defaults[option]\n- return default\n+ return getattr(TestcloudGuestData(), option, default)\n \n def wake(self, keys=None, data=None):\n \"\"\" Wake up the plugin, process data, apply options \"\"\"\n super().wake(keys=keys, data=data)\n \n # Convert memory and disk to integers\n- for key in ['memory', 'disk']:\n- if isinstance(self.get(key), str):\n- self.data[key] = int(self.data[key])\n+ # TODO: can they ever *not* be integers at this point?\n+ # for key in ['memory', 'disk']:\n+ # if isinstance(self.get(key), str):\n+ # self.data[key] = int(self.data[key])\n \n # Wake up testcloud instance\n if data:\n@@ -263,18 +280,19 @@ class ProvisionTestcloud(tmt.steps.provision.ProvisionPlugin):\n super().go()\n \n # Give info about provided data\n- data = dict()\n- for key in self._keys + self._common_keys:\n- data[key] = self.get(key)\n+ data = TestcloudGuestData(**{\n+ key: self.get(key)\n+ for key in TestcloudGuestData.keys()\n+ })\n+ for key, value in data.to_dict().items():\n if key == 'memory':\n- self.info('memory', f\"{self.get('memory')} MB\", 'green')\n+ self.info('memory', f\"{value} MB\", 'green')\n elif key == 'disk':\n- self.info('disk', f\"{self.get('disk')} GB\", 'green')\n+ self.info('disk', f\"{value} GB\", 'green')\n elif key == 'connection':\n- self.verbose(key, data[key], 'green')\n- else:\n- if data[key] is not None:\n- self.info(key, data[key], 'green')\n+ self.verbose('connection', value, 'green')\n+ elif value is not None:\n+ self.info(key, value, 'green')\n \n # Create a new GuestTestcloud instance and start it\n self._guest = GuestTestcloud(data, name=self.name, parent=self.step)\n@@ -315,6 +333,16 @@ class GuestTestcloud(tmt.GuestSsh):\n arch ....... architecture for the VM, host arch is the default\n \"\"\"\n \n+ _data_class = TestcloudGuestData\n+\n+ image: str\n+ image_url: Optional[str]\n+ instance_name: Optional[str]\n+ memory: int\n+ disk: str\n+ connection: str\n+ arch: str\n+\n # Not to be saved, recreated from image_url/instance_name/... every\n # time guest is instantiated.\n _image: Optional['testcloud.image.Image'] = None\n@@ -409,27 +437,6 @@ class GuestTestcloud(tmt.GuestSsh):\n with open(DOMAIN_TEMPLATE_FILE, 'w') as template:\n template.write(DOMAIN_TEMPLATE)\n \n- def load(self, data):\n- \"\"\" Load guest data and initialize attributes \"\"\"\n- super().load(data)\n- self.image = data.get('image')\n- self.image_url = data.get('image_url')\n- self.instance_name = data.get('instance_name')\n- self.memory = data.get('memory')\n- self.disk = data.get('disk')\n- self.connection = data.get('connection')\n- self.arch = data.get('arch')\n-\n- def save(self):\n- \"\"\" Save guest data for future wake up \"\"\"\n- data = super().save()\n- data['instance_name'] = self.instance_name\n- data['image'] = self.image\n- data['image_url'] = self.image_url\n- data['connection'] = self.connection\n- data['arch'] = self.arch\n- return data\n-\n def wake(self):\n \"\"\" Wake up the guest \"\"\"\n self.debug(\n"
}
] |
ff19da28d197996c05d86eddcf5351376c5c27f6 | teemtee/tmt | 03.08.2022 18:07:03 | MIT License | Reboot has to check for boot time
Previous attempt with closing connection was not succesful as shown by
/tests/execute/reboot/reuse_provision
btime in /proc/stat is boot time in seconds since epoch and reboot()
will make sure that it has changed
Adds disk requirement for tests/full which is helps with testing | [
{
"change_type": "MODIFY",
"old_path": "tests/full/plan.fmf",
"new_path": "tests/full/plan.fmf",
"diff": "@@ -3,5 +3,6 @@ discover:\n provision:\n how: virtual\n memory: 4000\n+ disk: 40\n execute:\n how: tmt\n"
},
{
"change_type": "MODIFY",
"old_path": "tmt/steps/provision/__init__.py",
"new_path": "tmt/steps/provision/__init__.py",
"diff": "@@ -10,7 +10,6 @@ import subprocess\n import tempfile\n import time\n from shlex import quote\n-from threading import Thread\n from typing import Dict, List, Optional, Type\n \n import click\n@@ -510,7 +509,7 @@ class Guest(tmt.utils.Common):\n \n def reconnect(self, timeout=None):\n \"\"\"\n- Ensure the connection to the guest is working after reboot\n+ Ensure the connection to the guest is working\n \n The default timeout is 5 minutes. Custom number of seconds can be\n provided in the `timeout` parameter. This may be useful when long\n@@ -534,7 +533,7 @@ class Guest(tmt.utils.Common):\n self.debug('Failed to connect to the guest, retrying.')\n time.sleep(1)\n else:\n- self.debug(\"Connection to guest failed after reboot.\")\n+ self.debug(\"Connection to guest failed.\")\n return False\n return True\n \n@@ -890,25 +889,20 @@ class GuestSsh(Guest):\n raise tmt.utils.ProvisionError(\n \"Method does not support hard reboot.\")\n \n- # Reboot takes its time and timeout is for whole reboot + reconnect\n- # so we need to shorten reconnect() appropriately\n- # but whole outcome is ignored https://github.com/teemtee/tmt/issues/1405\n- # and there is plan to make common handler for timeouts\n- # https://github.com/teemtee/tmt/pull/1280\n- # So do not pretend it is doing what is written and ignore it as the\n- # rest of tmt does (FIXME) and wait until better API\n-\n timeout = timeout or tmt.steps.provision.CONNECTION_TIMEOUT\n \n- def sleep_past_reboot():\n- try:\n- # Really long sleep which will be killed by connection drop\n- self.execute(f'sleep {timeout * 2}')\n- except tmt.utils.RunError:\n- pass\n+ now = datetime.datetime.utcnow\n+ deadline = now() + datetime.timedelta(seconds=timeout)\n+\n+ re_boot_time = re.compile(r'btime\\s+(\\d+)')\n \n- connection_probe = Thread(target=sleep_past_reboot)\n- connection_probe.start()\n+ def get_boot_time():\n+ \"\"\" Reads btime from /proc/stat \"\"\"\n+ stdout = self.execute([\"cat\", \"/proc/stat\"]).stdout\n+ assert stdout\n+ return int(re_boot_time.search(stdout).group(1))\n+\n+ current_boot_time = get_boot_time()\n \n try:\n command = command or \"reboot\"\n@@ -922,13 +916,21 @@ class GuestSsh(Guest):\n \"Seems the connection was closed too fast, ignoring.\")\n else:\n raise\n- # Wait until ssh connection drops (sleep_past_reboot is terminated)\n- self.debug(f\"Waiting up to {timeout}s for the connection to be dropped.\")\n- connection_probe.join(timeout=timeout)\n-\n- # FIXME reconnect should not be called if timeout is exceeded\n- # FIXME shorten reconnect timeout\n- return self.reconnect(timeout=timeout)\n+ # Wait until we get new boot time, connection will drop and will be\n+ # unreachable for some time\n+ while now() < deadline:\n+ try:\n+ new_boot_time = get_boot_time()\n+ if new_boot_time != current_boot_time:\n+ # Different boot time and we are reconnected\n+ return True\n+ self.debug(\"Same boot time, reboot didn't happen yet, retrying\")\n+ except tmt.utils.RunError:\n+ self.debug('Failed to connect to the guest, retrying.')\n+ # Either couldn't connect or boot time didn't change\n+ time.sleep(1)\n+ self.debug(\"Connection to guest failed - timeout exceeded.\")\n+ return False\n \n def remove(self):\n \"\"\"\n"
}
] |
be1084624b4fb2c501a66dfcfd3cb9bde8108747 | teemtee/tmt | 14.08.2022 12:51:09 | MIT License | Remove unused keys parameter from wake() methods
This seems to be no longer used, and makes `wake()` more complicated
than necessary. | [
{
"change_type": "MODIFY",
"old_path": "tmt/steps/__init__.py",
"new_path": "tmt/steps/__init__.py",
"diff": "@@ -613,7 +613,7 @@ class BasePlugin(Phase, metaclass=PluginIndex):\n return True\n return where in (guest.name, guest.role)\n \n- def wake(self, keys: Optional[List[str]] = None) -> None:\n+ def wake(self) -> None:\n \"\"\"\n Wake up the plugin, process data, apply options\n \n@@ -626,8 +626,8 @@ class BasePlugin(Phase, metaclass=PluginIndex):\n in the 'keys' parameter can be used to override only\n selected ones.\n \"\"\"\n- if keys is None:\n- keys = self._common_keys + self._keys\n+ keys = self._common_keys + self._keys\n+\n for key in keys:\n value = self.opt(key)\n if value:\n"
},
{
"change_type": "MODIFY",
"old_path": "tmt/steps/discover/fmf.py",
"new_path": "tmt/steps/discover/fmf.py",
"diff": "@@ -172,7 +172,7 @@ class DiscoverFmf(tmt.steps.discover.DiscoverPlugin):\n ),\n ] + super().options(how)\n \n- def wake(self, keys=None):\n+ def wake(self):\n \"\"\" Wake up the plugin, process data, apply options \"\"\"\n # Handle backward-compatible stuff\n if 'repository' in self.data:\n@@ -184,7 +184,7 @@ class DiscoverFmf(tmt.steps.discover.DiscoverPlugin):\n tmt.utils.listify(self.data, keys=[\"exclude\", \"filter\", \"test\"])\n \n # Process command line options, apply defaults\n- super().wake(keys=keys)\n+ super().wake()\n \n @property\n def is_in_standalone_mode(self):\n"
},
{
"change_type": "MODIFY",
"old_path": "tmt/steps/discover/shell.py",
"new_path": "tmt/steps/discover/shell.py",
"diff": "@@ -53,9 +53,9 @@ class DiscoverShell(tmt.steps.discover.DiscoverPlugin):\n test_names = [test['name'] for test in tests]\n click.echo(tmt.utils.format('tests', test_names))\n \n- def wake(self, keys: Optional[List[str]] = None) -> None:\n+ def wake(self) -> None:\n \"\"\" Wake up the plugin, process data, apply options \"\"\"\n- super().wake(keys=keys)\n+ super().wake()\n # Check provided tests, default to an empty list\n if 'tests' not in self.data:\n self.data['tests'] = []\n"
},
{
"change_type": "MODIFY",
"old_path": "tmt/steps/execute/internal.py",
"new_path": "tmt/steps/execute/internal.py",
"diff": "@@ -97,9 +97,9 @@ class ExecuteInternal(tmt.steps.execute.ExecutePlugin):\n help='Disable interactive progress bar showing the current test.'))\n return options + super().options(how)\n \n- def wake(self, keys=None):\n+ def wake(self):\n \"\"\" Wake up the plugin, process data, apply options \"\"\"\n- super().wake(keys=keys)\n+ super().wake()\n # Make sure that script is a list\n tmt.utils.listify(self.data, keys=['script'])\n \n"
},
{
"change_type": "MODIFY",
"old_path": "tmt/steps/finish/shell.py",
"new_path": "tmt/steps/finish/shell.py",
"diff": "@@ -47,9 +47,9 @@ class FinishShell(tmt.steps.finish.FinishPlugin):\n return default\n \n # TODO: use better types once superclass gains its annotations\n- def wake(self, keys: Optional[List[str]] = None) -> None:\n+ def wake(self) -> None:\n \"\"\" Wake up the plugin, process data, apply options \"\"\"\n- super().wake(keys=keys)\n+ super().wake()\n \n # Convert to list if single script provided\n tmt.utils.listify(self.data, keys=['script'])\n"
},
{
"change_type": "MODIFY",
"old_path": "tmt/steps/prepare/ansible.py",
"new_path": "tmt/steps/prepare/ansible.py",
"diff": "@@ -81,9 +81,9 @@ class PrepareAnsible(tmt.steps.prepare.PreparePlugin): # type: ignore[misc]\n return default\n \n # TODO: use better types once superclass gains its annotations\n- def wake(self, keys: Optional[List[str]] = None) -> None:\n+ def wake(self) -> None:\n \"\"\" Wake up the plugin, process data, apply options \"\"\"\n- super().wake(keys=keys)\n+ super().wake()\n \n # Convert to list if necessary\n tmt.utils.listify(self.data, keys=['playbook'])\n"
},
{
"change_type": "MODIFY",
"old_path": "tmt/steps/prepare/install.py",
"new_path": "tmt/steps/prepare/install.py",
"diff": "@@ -381,9 +381,9 @@ class PrepareInstall(tmt.steps.prepare.PreparePlugin):\n return []\n return default\n \n- def wake(self, keys=None):\n+ def wake(self):\n \"\"\" Wake up the plugin, process data, apply options \"\"\"\n- super().wake(keys=keys)\n+ super().wake()\n # Convert to list if necessary\n tmt.utils.listify(\n self.data, split=True,\n"
},
{
"change_type": "MODIFY",
"old_path": "tmt/steps/prepare/shell.py",
"new_path": "tmt/steps/prepare/shell.py",
"diff": "@@ -1,4 +1,4 @@\n-from typing import Any, List, Optional\n+from typing import Any, Optional\n \n import click\n import fmf\n@@ -48,9 +48,9 @@ class PrepareShell(tmt.steps.prepare.PreparePlugin): # type: ignore[misc]\n return []\n return default\n \n- def wake(self, keys: Optional[List[str]] = None) -> None:\n+ def wake(self) -> None:\n \"\"\" Wake up the plugin, process data, apply options \"\"\"\n- super().wake(keys=keys)\n+ super().wake()\n \n # Convert to list if single script provided\n tmt.utils.listify(self.data, keys=['script'])\n"
},
{
"change_type": "MODIFY",
"old_path": "tmt/steps/provision/__init__.py",
"new_path": "tmt/steps/provision/__init__.py",
"diff": "@@ -237,14 +237,14 @@ class ProvisionPlugin(tmt.steps.GuestlessPlugin):\n \n return provision\n \n- def wake(self, keys: Optional[List[str]] = None, data: Optional['GuestData'] = None) -> None:\n+ def wake(self, data: Optional['GuestData'] = None) -> None:\n \"\"\"\n Wake up the plugin\n \n Override data with command line options.\n Wake up the guest based on provided guest data.\n \"\"\"\n- super().wake(keys=keys)\n+ super().wake()\n \n def guest(self) -> Optional['Guest']:\n \"\"\"\n"
},
{
"change_type": "MODIFY",
"old_path": "tmt/steps/provision/artemis.py",
"new_path": "tmt/steps/provision/artemis.py",
"diff": "@@ -366,11 +366,10 @@ class ProvisionArtemis(tmt.steps.provision.ProvisionPlugin):\n # More specific type is a violation of Liskov substitution principle, and mypy\n # complains about it - rightfully so. Ignoring the issue which should be resolved\n # with https://github.com/teemtee/tmt/pull/1439.\n- def wake(self, keys: Optional[List[str]] = None, # type: ignore[override]\n- data: Optional[ArtemisGuestData] = None) -> None:\n+ def wake(self, data: Optional[ArtemisGuestData] = None) -> None: # type: ignore[override]\n \"\"\" Wake up the plugin, process data, apply options \"\"\"\n \n- super().wake(keys=keys, data=data)\n+ super().wake(data=data)\n \n if data:\n self._guest = GuestArtemis(data, name=self.name, parent=self.step)\n"
},
{
"change_type": "MODIFY",
"old_path": "tmt/steps/provision/connect.py",
"new_path": "tmt/steps/provision/connect.py",
"diff": "@@ -75,13 +75,9 @@ class ProvisionConnect(tmt.steps.provision.ProvisionPlugin):\n # No other defaults available\n return default\n \n- # More specific type is a violation of Liskov substitution principle, and mypy\n- # complains about it - rightfully so. Ignoring the issue which should be resolved\n- # with https://github.com/teemtee/tmt/pull/1439.\n- def wake(self, keys: Optional[List[str]] = None, # type: ignore[override]\n- data: Optional[GuestSshData] = None) -> None:\n+ def wake(self, data: Optional[GuestSshData] = None) -> None: # type: ignore[override]\n \"\"\" Wake up the plugin, process data, apply options \"\"\"\n- super().wake(keys=keys, data=data)\n+ super().wake(data=data)\n if data:\n self._guest = tmt.GuestSsh(data, name=self.name, parent=self.step)\n \n"
},
{
"change_type": "MODIFY",
"old_path": "tmt/steps/provision/local.py",
"new_path": "tmt/steps/provision/local.py",
"diff": "@@ -25,9 +25,9 @@ class ProvisionLocal(tmt.steps.provision.ProvisionPlugin):\n # Guest instance\n _guest = None\n \n- def wake(self, keys=None, data=None):\n+ def wake(self, data=None):\n \"\"\" Wake up the plugin, process data, apply options \"\"\"\n- super().wake(keys=keys, data=data)\n+ super().wake(data=data)\n if data:\n self._guest = GuestLocal(data, name=self.name, parent=self.step)\n \n"
},
{
"change_type": "MODIFY",
"old_path": "tmt/steps/provision/podman.py",
"new_path": "tmt/steps/provision/podman.py",
"diff": "@@ -76,9 +76,9 @@ class ProvisionPodman(tmt.steps.provision.ProvisionPlugin):\n \n return getattr(PodmanGuestData(), option.replace('-', '_'), default)\n \n- def wake(self, keys=None, data=None):\n+ def wake(self, data=None):\n \"\"\" Wake up the plugin, process data, apply options \"\"\"\n- super().wake(keys=keys, data=data)\n+ super().wake(data=data)\n # Wake up podman instance\n if data:\n guest = GuestContainer(data, name=self.name, parent=self.step)\n"
},
{
"change_type": "MODIFY",
"old_path": "tmt/steps/provision/testcloud.py",
"new_path": "tmt/steps/provision/testcloud.py",
"diff": "@@ -257,9 +257,9 @@ class ProvisionTestcloud(tmt.steps.provision.ProvisionPlugin):\n \"\"\" Return default data for given option \"\"\"\n return getattr(TestcloudGuestData(), option, default)\n \n- def wake(self, keys=None, data=None):\n+ def wake(self, data=None):\n \"\"\" Wake up the plugin, process data, apply options \"\"\"\n- super().wake(keys=keys, data=data)\n+ super().wake(data=data)\n \n # Wake up testcloud instance\n if data:\n"
}
] |
20ed22e894d89810daaf9a3d979763f9a5d94d74 | teemtee/tmt | 26.08.2022 14:20:48 | MIT License | Unify Polarion case searching
Save and load ids to/from results.yaml.
Add constant for extra identification keys.
Raise ReportError on TC not found in Polarion. | [
{
"change_type": "MODIFY",
"old_path": "tmt/base.py",
"new_path": "tmt/base.py",
"diff": "@@ -70,6 +70,9 @@ SECTIONS_HEADINGS = {\n 'Cleanup': ['<h1>Cleanup</h1>']\n }\n \n+# Extra keys used for identification in Result class\n+EXTRA_RESULT_IDENTIFICATION_KEYS = ['extra-nitrate', 'extra-task']\n+\n \n #\n # fmf id types\n@@ -2593,13 +2596,17 @@ class Result:\n self.note = data.get('note')\n self.duration = data.get('duration')\n if test:\n- self.id = test.node.get(\n- 'id', test.node.get(\n- 'extra-nitrate', test.node.get(\n- 'extra-task', '')))\n+ # Saving identifiable information for each test case so we can match them\n+ # to Polarion/Nitrate/other cases and report run results there\n+ self.ids = {tmt.identifier.ID_KEY: test.id}\n+ for key in EXTRA_RESULT_IDENTIFICATION_KEYS:\n+ self.ids[key] = test.node.get(key)\n interpret = test.result or 'respect'\n else:\n- self.id = ''\n+ try:\n+ self.ids = data['ids']\n+ except KeyError:\n+ self.ids = {}\n interpret = 'respect'\n \n # Check for valid results\n@@ -2684,6 +2691,8 @@ class Result:\n data['note'] = self.note\n if self.duration:\n data['duration'] = self.duration\n+ if self.ids:\n+ data['ids'] = self.ids\n return data\n \n \n"
},
{
"change_type": "MODIFY",
"old_path": "tmt/export.py",
"new_path": "tmt/export.py",
"diff": "@@ -304,24 +304,20 @@ def get_polarion_ids(\n return query_result[0].work_item_id, query_result[0].project_id\n \n \n-def get_polarion_case(data: Dict[str, str], preferred_project: Optional[str] = None) -> Any:\n- \"\"\" Get Polarion case through couple different methods \"\"\"\n- import_polarion()\n- polarion_id = 'None'\n- project_id = None\n-\n+def find_polarion_case_ids(\n+ data: Dict[str, str],\n+ preferred_project: Optional[str] = None) -> Tuple[str, Optional[str]]:\n+ \"\"\" Find IDs for Polarion case from data dictionary \"\"\"\n assert PolarionWorkItem\n- assert PolarionTestCase\n- assert PolarionException\n \n # Search by UUID\n if data.get(ID_KEY):\n query_result = PolarionWorkItem.query(\n data.get(ID_KEY), fields=['work_item_id', 'project_id'])\n- polarion_id, project_id = get_polarion_ids(query_result, preferred_project)\n+ return get_polarion_ids(query_result, preferred_project)\n # Search by TCMS Case ID\n extra_nitrate = data.get('extra-nitrate')\n- if not project_id and extra_nitrate:\n+ if extra_nitrate:\n nitrate_case_id_search = re.search(r'\\d+', extra_nitrate)\n if not nitrate_case_id_search:\n raise ConvertError(\n@@ -329,12 +325,23 @@ def get_polarion_case(data: Dict[str, str], preferred_project: Optional[str] = N\n nitrate_case_id = str(int(nitrate_case_id_search.group()))\n query_result = PolarionWorkItem.query(\n f\"tcmscaseid:{nitrate_case_id}\", fields=['work_item_id', 'project_id'])\n- polarion_id, project_id = get_polarion_ids(query_result, preferred_project)\n+ return get_polarion_ids(query_result, preferred_project)\n # Search by extra task\n- if not project_id and data.get('extra-task'):\n+ if data.get('extra-task'):\n query_result = PolarionWorkItem.query(\n data.get('extra-task'), fields=['work_item_id', 'project_id'])\n- polarion_id, project_id = get_polarion_ids(query_result, preferred_project)\n+ return get_polarion_ids(query_result, preferred_project)\n+ return 'None', None\n+\n+\n+def get_polarion_case(data: Dict[str, str], preferred_project: Optional[str] = None) -> Any:\n+ \"\"\" Get Polarion case through couple different methods \"\"\"\n+ import_polarion()\n+\n+ assert PolarionTestCase\n+ assert PolarionException\n+\n+ polarion_id, project_id = find_polarion_case_ids(data, preferred_project)\n \n try:\n polarion_case = PolarionTestCase(\n"
},
{
"change_type": "MODIFY",
"old_path": "tmt/steps/report/polarion.py",
"new_path": "tmt/steps/report/polarion.py",
"diff": "@@ -42,7 +42,7 @@ class ReportPolarion(tmt.steps.report.ReportPlugin):\n \"\"\" Go through executed tests and report into Polarion \"\"\"\n super().go()\n \n- from tmt.export import get_polarion_ids, import_polarion\n+ from tmt.export import find_polarion_case_ids, import_polarion\n import_polarion()\n from tmt.export import PolarionWorkItem\n assert PolarionWorkItem\n@@ -73,16 +73,16 @@ class ReportPolarion(tmt.steps.report.ReportPlugin):\n '*property[@name=\"polarion-project-span-ids\"]')\n \n for result in self.step.plan.execute.results():\n- if not result.id:\n+ if not result.ids or not any(result.ids.values()):\n raise tmt.utils.ReportError(\n f\"Test Case {result.name} is not exported to Polarion, \"\n \"please run 'tmt tests export --how polarion' on it\")\n- work_item_id, test_project_id = get_polarion_ids(\n- PolarionWorkItem.query(\n- result.id, fields=['work_item_id', 'project_id']))\n+ work_item_id, test_project_id = find_polarion_case_ids(result.ids)\n+\n+ if test_project_id is None:\n+ raise tmt.utils.ReportError(\"Test case missing or not found in Polarion\")\n \n assert work_item_id is not None\n- assert test_project_id is not None\n assert project_span_ids is not None\n \n if test_project_id not in project_span_ids.attrib['value']:\n"
}
] |
c724839dae3bbde4cd102e5e174d1f4886a7abab | teemtee/tmt | 17.08.2022 13:18:40 | MIT License | Replace blank "type: ignore" with more specific waivers
Using mypy's error codes, waivers can target the actual erorr, leaving
space for other kind of violations to not be suppressed. | [
{
"change_type": "MODIFY",
"old_path": ".pre-commit-config.yaml",
"new_path": ".pre-commit-config.yaml",
"diff": "@@ -60,3 +60,22 @@ repos:\n hooks:\n - id: yamllint\n files: ^tmt/schemas/.*\\.yaml\n+\n+ # Yet another static analysis - these hooks use regular expressions to\n+ # process Python code, and offer interesting \"metalinters\", checks for\n+ # what we do to appease flake8 and mypy linters.\n+ - repo: https://github.com/pre-commit/pygrep-hooks\n+ rev: v1.9.0\n+ hooks:\n+ # Enforce `noqa` and `type: ignore` to always appear with specific\n+ # error code(s).\n+ - id: python-check-blanket-noqa\n+ - id: python-check-blanket-type-ignore\n+\n+ # Other potentially useful hooks for future consideration:\n+ #\n+ # - id: python-check-mock-methods\n+ # - id: python-no-eval\n+ # - id: python-no-log-warn\n+ # - id: python-use-type-annotations\n+ # - id: text-unicode-replacement-char\n"
},
{
"change_type": "MODIFY",
"old_path": "tmt/cli.py",
"new_path": "tmt/cli.py",
"diff": "@@ -394,7 +394,7 @@ if run_callback is None:\n \n \n # TODO: commands is unknown, needs revisit\n-@run_callback() # type: ignore\n+@run_callback() # type: ignore[misc]\n @click.pass_context\n def finito(\n click_context: click.core.Context,\n@@ -1310,7 +1310,7 @@ if clean_callback is None:\n \n \n # TODO: commands is unknown, needs revisit\n-@clean_callback() # type: ignore\n+@clean_callback() # type: ignore[misc]\n @click.pass_context\n def perform_clean(\n click_context: click.core.Context,\n"
},
{
"change_type": "MODIFY",
"old_path": "tmt/export.py",
"new_path": "tmt/export.py",
"diff": "@@ -66,7 +66,7 @@ def import_nitrate() -> Any:\n except ImportError:\n raise ConvertError(\n \"Install tmt-test-convert to export tests to nitrate.\")\n- except nitrate.NitrateError as error: # type: ignore\n+ except nitrate.NitrateError as error: # type: ignore[union-attr] # nitrate is no longer None\n raise ConvertError(error)\n \n \n"
},
{
"change_type": "MODIFY",
"old_path": "tmt/steps/finish/ansible.py",
"new_path": "tmt/steps/finish/ansible.py",
"diff": "@@ -34,4 +34,4 @@ class FinishAnsible(tmt.steps.finish.FinishPlugin, PrepareAnsible):\n \n # Assigning class methods seems to cause trouble to mypy\n # See also: https://github.com/python/mypy/issues/6700\n- base_command = tmt.steps.finish.FinishPlugin.base_command # type: ignore\n+ base_command = tmt.steps.finish.FinishPlugin.base_command # type: ignore[assignment]\n"
},
{
"change_type": "MODIFY",
"old_path": "tmt/steps/provision/testcloud.py",
"new_path": "tmt/steps/provision/testcloud.py",
"diff": "@@ -350,7 +350,7 @@ class GuestTestcloud(tmt.GuestSsh):\n \"\"\"\n \n # TODO: Revisit this `type: ignore` once `Guest` becomes a generic type\n- _data_class = TestcloudGuestData # type: ignore\n+ _data_class = TestcloudGuestData # type: ignore[assignment]\n \n image: str\n image_url: Optional[str]\n@@ -362,8 +362,8 @@ class GuestTestcloud(tmt.GuestSsh):\n \n # Not to be saved, recreated from image_url/instance_name/... every\n # time guest is instantiated.\n- _image: Optional['testcloud.image.Image'] = None # type: ignore\n- _instance: Optional['testcloud.instance.Instance'] = None # type: ignore\n+ _image: Optional['testcloud.image.Image'] = None # type: ignore[name-defined]\n+ _instance: Optional['testcloud.instance.Instance'] = None # type: ignore[name-defined]\n \n def _get_url(self, url: str, message: str) -> requests.Response:\n \"\"\" Get url, retry when fails, return response \"\"\"\n"
},
{
"change_type": "MODIFY",
"old_path": "tmt/steps/report/junit.py",
"new_path": "tmt/steps/report/junit.py",
"diff": "@@ -124,10 +124,10 @@ class ReportJUnit(tmt.steps.report.ReportPlugin):\n try:\n with open(f_path, 'w') as fw:\n if hasattr(junit_xml, 'to_xml_report_file'):\n- junit_xml.to_xml_report_file(fw, [suite]) # type: ignore\n+ junit_xml.to_xml_report_file(fw, [suite]) # type: ignore[union-attr]\n else:\n # For older junit-xml\n- junit_xml.TestSuite.to_file(fw, [suite]) # type: ignore\n+ junit_xml.TestSuite.to_file(fw, [suite]) # type: ignore[union-attr]\n self.info(\"output\", f_path, 'yellow')\n except Exception as error:\n raise tmt.utils.ReportError(\n"
},
{
"change_type": "MODIFY",
"old_path": "tmt/utils.py",
"new_path": "tmt/utils.py",
"diff": "@@ -1798,7 +1798,7 @@ class TimeoutHTTPAdapter(requests.adapters.HTTPAdapter):\n \n super().__init__(*args, **kwargs)\n \n- def send( # type: ignore # does not match superclass type on purpose\n+ def send( # type: ignore[override] # does not match superclass type on purpose\n self,\n request: requests.PreparedRequest,\n **kwargs: Any) -> requests.Response:\n@@ -1839,7 +1839,7 @@ class RetryStrategy(requests.packages.urllib3.util.retry.Retry): # type: ignore\n return super().increment(*args, **kwargs)\n \n \n-class retry_session(contextlib.AbstractContextManager): # type: ignore\n+class retry_session(contextlib.AbstractContextManager): # type: ignore[type-arg]\n \"\"\"\n Context manager for requests.Session() with retries and timeout\n \"\"\"\n@@ -2627,7 +2627,7 @@ def git_clone(\n return git_clone(url, destination, common, env, shallow=False)\n \n \n-class updatable_message(contextlib.AbstractContextManager): # type: ignore\n+class updatable_message(contextlib.AbstractContextManager): # type: ignore[type-arg]\n \"\"\" Updatable message suitable for progress-bar-like reporting \"\"\"\n \n def __init__(\n"
}
] |
8c7efb43f4bb5ebeabf41bce7b87f31a95074a1d | teemtee/tmt | 14.08.2022 16:39:48 | MIT License | Add a helper for importing a member from a module
This primitive appeared twice already, and follows the same
scenario: import a module, then locate a member (a class, usually)
in the module, and catch possible errors along the way. | [
{
"change_type": "MODIFY",
"old_path": "tests/unit/test_utils.py",
"new_path": "tests/unit/test_utils.py",
"diff": "@@ -12,6 +12,8 @@ import unittest.mock\n import pytest\n \n import tmt\n+import tmt.plugins\n+import tmt.steps.discover\n from tmt.utils import (Common, GeneralError, StructuredField,\n StructuredFieldError, WaitingIncomplete,\n WaitingTimedOutError, duration_to_seconds, listify,\n@@ -804,3 +806,23 @@ def test_wait_success_but_too_late():\n \n with pytest.raises(WaitingTimedOutError):\n wait(Common(), check, datetime.timedelta(seconds=1))\n+\n+\n+def test_import_member():\n+ klass = tmt.plugins.import_member('tmt.steps.discover', 'Discover')\n+\n+ assert klass is tmt.steps.discover.Discover\n+\n+\n+def test_import_member_no_such_module():\n+ with pytest.raises(\n+ tmt.utils.GeneralError,\n+ match=r\"Failed to import module 'tmt\\.steps\\.nope_does_not_exist'.\"):\n+ tmt.plugins.import_member('tmt.steps.nope_does_not_exist', 'Discover')\n+\n+\n+def test_import_member_no_such_class():\n+ with pytest.raises(\n+ tmt.utils.GeneralError,\n+ match=r\"No such member 'NopeDoesNotExist' in module 'tmt\\.steps\\.discover'.\"):\n+ tmt.plugins.import_member('tmt.steps.discover', 'NopeDoesNotExist')\n"
},
{
"change_type": "MODIFY",
"old_path": "tmt/plugins/__init__.py",
"new_path": "tmt/plugins/__init__.py",
"diff": "@@ -6,7 +6,7 @@ import importlib\n import os\n import pkgutil\n import sys\n-from typing import Generator, Optional\n+from typing import Any, Generator, Optional\n \n if sys.version_info < (3, 9):\n from importlib_metadata import entry_points\n@@ -73,6 +73,27 @@ def import_(module: str, path: Optional[str] = None) -> None:\n (f\" from '{path}'.\" if path else \".\") + f\"\\n({error})\")\n \n \n+def import_member(module_name: str, member_name: str) -> Any:\n+ \"\"\" Import member from given module, handle errors nicely \"\"\"\n+ # Make sure the module is imported. It probably is, but really,\n+ # make sure of it.\n+ try:\n+ import_(module_name)\n+ except SystemExit as exc:\n+ raise tmt.utils.GeneralError(f\"Failed to import module '{module_name}'.\", original=exc)\n+\n+ # Now the module should be available in `sys.modules` like any\n+ # other, and we can go and grab the class we need from it.\n+ if module_name not in sys.modules:\n+ raise tmt.utils.GeneralError(f\"Failed to import module '{module_name}'.\")\n+ module = sys.modules[module_name]\n+\n+ # Get the member and return it\n+ if not hasattr(module, member_name):\n+ raise tmt.utils.GeneralError(f\"No such member '{member_name}' in module '{module_name}'.\")\n+ return getattr(module, member_name)\n+\n+\n def discover(path: str) -> Generator[str, None, None]:\n \"\"\" Discover available plugins for given paths \"\"\"\n for _, name, package in pkgutil.iter_modules([path]):\n"
},
{
"change_type": "MODIFY",
"old_path": "tmt/utils.py",
"new_path": "tmt/utils.py",
"diff": "@@ -1483,7 +1483,7 @@ class SerializableContainer:\n and dynamic imports of modules as needed.\n \"\"\"\n \n- from tmt.plugins import import_\n+ from tmt.plugins import import_member\n \n # Unpack class info, to get nicer variable names\n if \"__class__\" not in serialized:\n@@ -1492,22 +1492,7 @@ class SerializableContainer:\n \"Use 'tmt clean runs' to clean up old runs.\")\n \n klass_info = serialized.pop('__class__')\n- klass_module_name = klass_info['module']\n- klass_name = klass_info['name']\n-\n- # Make sure the module is imported. It probably is, but really,\n- # make sure of it.\n- import_(klass_module_name)\n-\n- # Now the module should be available in `sys.modules` like any\n- # other, and we can go and grab the class we need from it.\n- klass_module = sys.modules[klass_module_name]\n- klass = getattr(klass_module, klass_name)\n-\n- if klass is None:\n- raise SystemExit(\n- f\"Failed to import '{klass_name}' \"\n- f\"from '{klass_module_name}' module.\")\n+ klass = import_member(klass_info['module'], klass_info['name'])\n \n # Stay away from classes that are not derived from this one, to\n # honor promise given by return value annotation.\n@@ -2920,26 +2905,20 @@ def _prenormalize_fmf_node(node: fmf.Tree, schema_name: str) -> fmf.Tree:\n # Instead of having a set of if-elif tests, we can reach the default `how`\n # dynamically.\n \n- from tmt.plugins import import_\n+ from tmt.plugins import import_member\n \n step_module_name = f'tmt.steps.{step_name}'\n step_class_name = step_name.capitalize()\n \n- # Make sure the step module is imported. It probably is, but really,\n- # make sure of it.\n- import_(step_module_name)\n-\n- # Now the module should be available in `sys.modules` like any\n- # other, and we can go and grab the class we need from it.\n- step_module = sys.modules[step_module_name]\n- step_class = getattr(step_module, step_class_name)\n+ step_class = import_member(step_module_name, step_class_name)\n \n- if step_class is None:\n+ if not issubclass(step_class, tmt.steps.Step):\n raise GeneralError(\n- f'Step {step_name} implementation cannot be found '\n- f'in {step_module_name}.{step_class_name}')\n+ 'Possible step {step_name} implementation '\n+ f'{step_module_name}.{step_class_name} is not a subclass '\n+ 'of tmt.steps.Step class.')\n \n- step['how'] = cast(tmt.steps.Step, step_class).DEFAULT_HOW\n+ step['how'] = step_class.DEFAULT_HOW\n \n def _process_step_collection(step_name: str, step_collection: Any) -> None:\n \"\"\"\n"
}
] |
c2eee2af912e34debfcfa8c575835c2f9d1812d2 | teemtee/tmt | 20.09.2022 14:31:28 | MIT License | Document & correct use of class conversion methods
Recently added `{to,from}_{serialized,raw,dict}` methods for various
conversions were poorly documented, and sometimes even wrongly used.
This patch adds a short summary on when to use them, and fixes some
sub par bits around the code. | [
{
"change_type": "MODIFY",
"old_path": "docs/classes.rst",
"new_path": "docs/classes.rst",
"diff": "@@ -72,6 +72,113 @@ In a similar way, the ``tree`` property of the ``Tree`` instance\n points to the original ``fmf.Tree`` from which it was initialized.\n \n \n+Class Conversions\n+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n+\n+Various internal objects and classes often need to be converted\n+from their Python nature to data that can be saved, loaded or\n+exported in different form. To facilitate these conversions, three\n+families of helper methods are provided, each with its own set of\n+use cases.\n+\n+``to_spec``/``from_spec``\n+------------------------------------------------------------------\n+\n+This family of methods works with tmt *specification*, i.e. raw\n+user-provided data coming from fmf files describing plans, tests,\n+stories, or from command-line options. ``from_spec()`` shall be\n+called to spawn objects representing the user input, while\n+``to_spec()`` should produce output one could find in fmf files.\n+\n+The default implementation comes from ``tmt.utils.SpecBasedContainer``\n+class, all classes based on user input data should include this\n+class among their bases.\n+\n+.. code-block:: python\n+\n+ # Create an fmf id object from raw data\n+ fmf_id = tmt.base.FmfId.from_spec({'url': ..., 'ref': ...})\n+\n+\n+``to_serialized``/``from_serialized``/``unserialize``\n+------------------------------------------------------------------\n+\n+This family of methods is aiming at runtime objects that may be\n+saved into and loaded from tmt working files, i.e. files tmt uses\n+to store a state in its workdir, like `step.yaml` or `guests.yaml`.\n+\n+Third member of this family, ``unserialize``, is similar to\n+``from_serialized`` - both create an object from its serialized form,\n+only ``unserialize`` is capable of detecting the class to instantiate\n+while for using ``from_serialized``, one must already know which\n+class to work with. ``unserialize`` then uses ``from_serialized``\n+under the hood to do the heavy lifting when correct class is\n+identified.\n+\n+The default implementation comes from ``tmt.utils.SerializableContainer``\n+class, all classes that are being saved and loaded during tmt run\n+should include this class among their bases.\n+\n+See https://en.wikipedia.org/wiki/Serialization for more details\n+on the concept of serialization.\n+\n+.. code-block:: python\n+\n+ # tmt.steps.discover.shell.DiscoverShellData wishes to unserialize its\n+ # `tests` a list of `TestDescription` objects rather than a list of\n+ # dictionaries (the default implementation).\n+ @classmethod\n+ def from_serialized(cls, serialized: Dict[str, Any]) -> 'DiscoverShellData':\n+ obj = super().from_serialized(serialized)\n+\n+ obj.tests = [TestDescription.from_serialized(\n+ serialized_test) for serialized_test in serialized['tests']]\n+\n+ return obj\n+\n+ # A step saving its state...\n+ content: Dict[str, Any] = {\n+ 'status': self.status(),\n+ 'data': [datum.to_serialized() for datum in self.data]\n+ }\n+ self.write('step.yaml', tmt.utils.dict_to_yaml(content))\n+\n+ # ... and loading it back.\n+ # Note the use of unserialize(): step data may have been serialized from\n+ # various different classes (derived from tmt.steps.provision.Guest),\n+ # and unserialize() will detect the correct class.\n+ raw_step_data: Dict[Any, Any] = tmt.utils.yaml_to_dict(self.read('step.yaml'))\n+ self.data = [\n+ StepData.unserialize(raw_datum) for raw_datum in raw_step_data['data']\n+ ]\n+\n+\n+``to_dict``\n+------------------------------------------------------------------\n+\n+Very special helper method: its use cases are not related to any\n+input or output data, and most of the time, when in need of\n+iterating over object's keys and/or values, one can use ``keys()``,\n+``values()`` or ``items()`` methods. It is used as a source of data\n+for serialization and validation, but it usually has no use outside\n+of default implementations.\n+\n+.. warning::\n+\n+ If you think of using ``to_dict()``, please, think again and be sure\n+ you know what are you doing. Despite its output being sometimes\n+ perfectly compatible with output of ``to_serialized()`` or ``to_spec()``,\n+ it is not generaly true, and using it instead of proper methods may lead\n+ to unexpected exceptions.\n+\n+.. code-block:: python\n+\n+ # tmt.base.FmfId's specification is basically just a mapping,\n+ # therefore `to_dict()` is good enough to produce a specification.\n+ def to_spec(self) -> Dict[str, Any]:\n+ return self.to_dict()\n+\n+\n Essential Classes\n ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n \n"
},
{
"change_type": "MODIFY",
"old_path": "tmt/base.py",
"new_path": "tmt/base.py",
"diff": "@@ -94,7 +94,7 @@ _RawFmfId = TypedDict('_RawFmfId', {\n \n # An internal fmf id representation.\n @dataclasses.dataclass\n-class FmfId(tmt.utils.SerializableContainer):\n+class FmfId(tmt.utils.SpecBasedContainer, tmt.utils.SerializableContainer):\n # The list of valid fmf id keys\n keys: ClassVar[List[str]] = ['url', 'ref', 'path', 'name']\n \n@@ -103,19 +103,14 @@ class FmfId(tmt.utils.SerializableContainer):\n path: Optional[str] = None\n name: Optional[str] = None\n \n- def to_dict(self) -> Dict[str, Any]:\n- \"\"\" Return keys and values in the form of a dictionary \"\"\"\n-\n- return dataclasses.asdict(self)\n-\n- def to_raw(self) -> Dict[str, Any]:\n- \"\"\" Return keys and values as if they originated from fmf node \"\"\"\n+ def to_spec(self) -> Dict[str, Any]:\n+ \"\"\" Convert to a form suitable for saving in a specification file \"\"\"\n \n return self.to_dict()\n \n @classmethod\n- def from_dict(cls, raw: _RawFmfId) -> 'FmfId':\n- \"\"\" Construct an :py:class:`FmfId` from given input container \"\"\"\n+ def from_spec(cls, raw: _RawFmfId) -> 'FmfId':\n+ \"\"\" Convert from a specification file or from a CLI option \"\"\"\n \n return FmfId(**{key: raw.get(key, None) for key in cls.keys})\n \n@@ -133,7 +128,7 @@ class FmfId(tmt.utils.SerializableContainer):\n # Simple asdict() is not good enough, fmf does not like keys that exist but are `None`.\n # Don't include those.\n fmf.base.Tree.node({\n- key: value for key, value in self.to_dict().items()\n+ key: value for key, value in self.items()\n if value is not None\n })\n except fmf.utils.GeneralError as error:\n@@ -421,7 +416,7 @@ class Core(tmt.utils.Common):\n # Links.__init__() method - it is tempting to use to_serialized()\n # and from_unserialized(), but we don't use unserialization code\n # when loading saved data back, so we can't go this way. Yet.\n- data[key] = cast('Links', value).to_raw()\n+ data[key] = cast('Links', value).to_spec()\n \n else:\n data[key] = value\n@@ -1593,7 +1588,7 @@ class Tree(tmt.utils.Common):\n filters = (filters or []) + list(Test._opt('filters', []))\n conditions = (conditions or []) + list(Test._opt('conditions', []))\n links = (links or []) + [\n- LinkNeedle.from_raw(value)\n+ LinkNeedle.from_spec(value)\n for value in cast(List[str], Test._opt('links', []))\n ]\n excludes = (excludes or []) + list(Test._opt('exclude', []))\n@@ -1650,7 +1645,7 @@ class Tree(tmt.utils.Common):\n filters = (filters or []) + list(Plan._opt('filters', []))\n conditions = (conditions or []) + list(Plan._opt('conditions', []))\n links = (links or []) + [\n- LinkNeedle.from_raw(value)\n+ LinkNeedle.from_spec(value)\n for value in cast(List[str], Plan._opt('links', []))\n ]\n excludes = (excludes or []) + list(Plan._opt('exclude', []))\n@@ -1685,7 +1680,7 @@ class Tree(tmt.utils.Common):\n filters = (filters or []) + list(Story._opt('filters', []))\n conditions = (conditions or []) + list(Story._opt('conditions', []))\n links = (links or []) + [\n- LinkNeedle.from_raw(value)\n+ LinkNeedle.from_spec(value)\n for value in cast(List[str], Story._opt('links', []))\n ]\n excludes = (excludes or []) + list(Story._opt('exclude', []))\n@@ -2479,9 +2474,9 @@ class LinkNeedle:\n target: str = r'.*'\n \n @classmethod\n- def from_raw(cls, value: str) -> 'LinkNeedle':\n+ def from_spec(cls, value: str) -> 'LinkNeedle':\n \"\"\"\n- Create a ``LinkNeedle`` instance from its specification.\n+ Convert from a specification file or from a CLI option\n \n Specification is described in [1], this constructor takes care\n of parsing it into a corresponding ``LinkNeedle`` instance.\n@@ -2521,7 +2516,7 @@ class LinkNeedle:\n \n \n @dataclasses.dataclass\n-class Link:\n+class Link(tmt.utils.SpecBasedContainer):\n \"\"\"\n An internal \"link\" as defined by tmt specification.\n \n@@ -2538,9 +2533,9 @@ class Link:\n note: Optional[str] = None\n \n @classmethod\n- def from_raw(cls, spec: _RawLink) -> 'Link':\n+ def from_spec(cls, spec: _RawLink) -> 'Link':\n \"\"\"\n- Create a ``Link`` instance from its specification.\n+ Convert from a specification file or from a CLI option\n \n Specification is described in [1], this constructor takes care\n of parsing it into a corresponding ``Link`` instance.\n@@ -2570,7 +2565,7 @@ class Link:\n if len(relations) == 0:\n return Link(\n relation=Link.DEFAULT_RELATIONSHIP,\n- target=FmfId.from_dict(cast(_RawFmfId, spec)),\n+ target=FmfId.from_spec(cast(_RawFmfId, spec)),\n note=note)\n \n # More relations than 1 are a hard error, only 1 is allowed.\n@@ -2599,25 +2594,25 @@ class Link:\n if isinstance(raw_target, str):\n return Link(relation=relation, target=raw_target, note=note)\n \n- return Link(relation=relation, target=FmfId.from_dict(raw_target), note=note)\n+ return Link(relation=relation, target=FmfId.from_spec(raw_target), note=note)\n \n- def to_raw(self) -> _RawLinkRelation:\n+ def to_spec(self) -> _RawLinkRelation:\n \"\"\"\n- Convert this link into a corresponding link specification.\n+ Convert to a form suitable for saving in a specification file\n \n No matter what the original specification was, every link will\n generate the very same type of specification, the ``relation: target``\n one.\n \n Output of this method is fully compatible with specification, and when\n- given to :py:meth:`from_raw`, it shall create a ``Link`` instance\n+ given to :py:meth:`from_spec`, it shall create a ``Link`` instance\n with the same properties as the original one.\n \n [1] https://tmt.readthedocs.io/en/stable/spec/core.html#link\n \"\"\"\n \n spec = {\n- self.relation: self.target.to_dict() if isinstance(\n+ self.relation: self.target.to_spec() if isinstance(\n self.target,\n FmfId) else self.target}\n \n@@ -2627,7 +2622,7 @@ class Link:\n return spec\n \n \n-class Links:\n+class Links(tmt.utils.SpecBasedContainer):\n \"\"\"\n Collection of links in tests, plans and stories.\n \n@@ -2668,11 +2663,11 @@ class Links:\n specs = data if isinstance(data, list) else [data]\n \n # Ensure that each link is in the canonical form\n- self._links = [Link.from_raw(spec) for spec in specs]\n+ self._links = [Link.from_spec(spec) for spec in specs]\n \n- def to_raw(self) -> List[_RawLinkRelation]:\n+ def to_spec(self) -> List[_RawLinkRelation]:\n \"\"\"\n- Convert this collection of links into a corresponding specification.\n+ Convert to a form suitable for saving in a specification file\n \n No matter what the original specification was, every link will\n generate the very same type of specification, the ``relation: target``\n@@ -2686,7 +2681,7 @@ class Links:\n \"\"\"\n \n return [\n- link.to_raw()\n+ link.to_spec()\n for link in self._links\n ]\n \n"
},
{
"change_type": "MODIFY",
"old_path": "tmt/steps/__init__.py",
"new_path": "tmt/steps/__init__.py",
"diff": "@@ -85,7 +85,10 @@ T = TypeVar('T', bound='StepData')\n \n \n @dataclasses.dataclass\n-class StepData(tmt.utils.NormalizeKeysMixin, tmt.utils.SerializableContainer):\n+class StepData(\n+ tmt.utils.SpecBasedContainer,\n+ tmt.utils.NormalizeKeysMixin,\n+ tmt.utils.SerializableContainer):\n \"\"\"\n Keys necessary to describe, create, save and restore a step.\n \n@@ -110,13 +113,8 @@ class StepData(tmt.utils.NormalizeKeysMixin, tmt.utils.SerializableContainer):\n order: int = tmt.utils.DEFAULT_PLUGIN_ORDER\n summary: Optional[str] = None\n \n- def to_raw(self) -> _RawStepData:\n- \"\"\"\n- Serialize step data instance to a raw representation.\n-\n- The returned value can be used to recreate step data when given\n- to :py:meth:`from_raw`.\n- \"\"\"\n+ def to_spec(self) -> _RawStepData: # type: ignore[override]\n+ \"\"\" Convert to a form suitable for saving in a specification file \"\"\"\n \n return cast(_RawStepData, {\n tmt.utils.key_to_option(key): value\n@@ -135,10 +133,12 @@ class StepData(tmt.utils.NormalizeKeysMixin, tmt.utils.SerializableContainer):\n pass\n \n @classmethod\n- def from_raw(cls: Type[T], raw_data: _RawStepData, logger: tmt.utils.Common) -> T:\n- \"\"\"\n- Unserialize step data instance from its a raw representation.\n- \"\"\"\n+ def from_spec( # type: ignore[override]\n+ cls: Type[T],\n+ raw_data: _RawStepData,\n+ logger: tmt.utils.Common\n+ ) -> T:\n+ \"\"\" Convert from a specification file or from a CLI option \"\"\"\n \n cls.pre_normalization(raw_data, logger)\n \n@@ -406,7 +406,7 @@ class Step(tmt.utils.Common):\n # form for _normalize_data().\n if datum.how == how:\n self.debug(f' compatible: {datum}', level=4)\n- _raw_data.append(datum.to_raw())\n+ _raw_data.append(datum.to_spec())\n \n # Mismatch, throwing away, replacing with new `how` - but we can keep the name.\n else:\n@@ -718,7 +718,7 @@ class BasePlugin(Phase, metaclass=PluginIndex):\n # normalization in the process.\n if raw_data is not None:\n try:\n- data = plugin_data_class.from_raw(raw_data, step)\n+ data = plugin_data_class.from_spec(raw_data, step)\n \n except Exception as exc:\n raise tmt.utils.GeneralError(\n"
},
{
"change_type": "MODIFY",
"old_path": "tmt/steps/discover/fmf.py",
"new_path": "tmt/steps/discover/fmf.py",
"diff": "@@ -439,7 +439,7 @@ class DiscoverFmf(tmt.steps.discover.DiscoverPlugin):\n \n # Check the 'test --link' option first, then from discover\n raw_link_needles = cast(List[str], tmt.Test._opt('links', []) or self.get('link', []))\n- link_needles = [tmt.base.LinkNeedle.from_raw(\n+ link_needles = [tmt.base.LinkNeedle.from_spec(\n raw_needle) for raw_needle in raw_link_needles]\n \n for link_needle in link_needles:\n"
},
{
"change_type": "MODIFY",
"old_path": "tmt/steps/discover/shell.py",
"new_path": "tmt/steps/discover/shell.py",
"diff": "@@ -21,7 +21,10 @@ T = TypeVar('T', bound='TestDescription')\n \n \n @dataclasses.dataclass\n-class TestDescription(tmt.utils.NormalizeKeysMixin, tmt.utils.SerializableContainer):\n+class TestDescription(\n+ tmt.utils.SpecBasedContainer,\n+ tmt.utils.NormalizeKeysMixin,\n+ tmt.utils.SerializableContainer):\n \"\"\"\n Keys necessary to describe a shell-based test.\n \n@@ -105,30 +108,40 @@ class TestDescription(tmt.utils.NormalizeKeysMixin, tmt.utils.SerializableContai\n \n # Our own implementation, parent uses `name` and `how`, and tests don't have any `how`.\n @classmethod\n- def from_raw(cls: Type[T], raw_data: Dict[str, Any], logger: tmt.utils.Common) -> T:\n- \"\"\"\n- Unserialize step data instance from its a raw representation.\n- \"\"\"\n+ def from_spec( # type: ignore[override]\n+ cls: Type[T],\n+ raw_data: Dict[str, Any],\n+ logger: tmt.utils.Common\n+ ) -> T:\n+ \"\"\" Convert from a specification file or from a CLI option \"\"\"\n \n data = cls(name=raw_data['name'], test=raw_data['test'])\n data._load_keys(raw_data, cls.__name__, logger)\n \n return data\n \n- def to_raw(self) -> Dict[str, Any]:\n- data = super().to_dict()\n- data['link'] = self.link.to_raw() if self.link else None\n+ def to_spec(self) -> Dict[str, Any]:\n+ \"\"\" Convert to a form suitable for saving in a specification file \"\"\"\n+\n+ data = super().to_spec()\n+ data['link'] = self.link.to_spec() if self.link else None\n data['require'] = [\n- require if isinstance(require, str) else require.to_raw()\n+ require if isinstance(require, str) else require.to_spec()\n for require in self.require\n ]\n \n return data\n \n def to_serialized(self) -> Dict[str, Any]:\n+ \"\"\" Convert to a form suitable for saving in a file \"\"\"\n+\n data = super().to_serialized()\n \n- data['link'] = self.link.to_raw() if self.link else None\n+ # Using `to_spec()` on purpose: `Links` does not provide serialization\n+ # methods, because specification of links is already good enough. We\n+ # can use existing `to_spec()` method, and undo it with a simple\n+ # `Links(...)` call.\n+ data['link'] = self.link.to_spec() if self.link else None\n data['require'] = [\n require if isinstance(require, str) else require.to_serialized()\n for require in self.require\n@@ -138,6 +151,8 @@ class TestDescription(tmt.utils.NormalizeKeysMixin, tmt.utils.SerializableContai\n \n @classmethod\n def from_serialized(cls, serialized: Dict[str, Any]) -> 'TestDescription':\n+ \"\"\" Convert from a serialized form loaded from a file \"\"\"\n+\n obj = super().from_serialized(serialized)\n obj.link = tmt.base.Links(serialized['link'])\n obj.require = [\n@@ -154,9 +169,11 @@ class DiscoverShellData(tmt.steps.discover.DiscoverStepData):\n \n def _normalize_tests(self, value: List[Dict[str, Any]]\n ) -> List[TestDescription]:\n- return [TestDescription.from_raw(raw_datum, tmt.utils.Common()) for raw_datum in value]\n+ return [TestDescription.from_spec(raw_datum, tmt.utils.Common()) for raw_datum in value]\n \n def to_serialized(self) -> Dict[str, Any]:\n+ \"\"\" Convert to a form suitable for saving in a file \"\"\"\n+\n serialized = super().to_serialized()\n \n serialized['tests'] = [test.to_serialized() for test in self.tests]\n@@ -165,6 +182,8 @@ class DiscoverShellData(tmt.steps.discover.DiscoverStepData):\n \n @classmethod\n def from_serialized(cls, serialized: Dict[str, Any]) -> 'DiscoverShellData':\n+ \"\"\" Convert from a serialized form loaded from a file \"\"\"\n+\n obj = super().from_serialized(serialized)\n \n obj.tests = [TestDescription.from_serialized(\n@@ -250,7 +269,7 @@ class DiscoverShell(tmt.steps.discover.DiscoverPlugin):\n if dist_git_source:\n data.environment['TMT_SOURCE_DIR'] = sourcedir\n # Create a simple fmf node, adjust its name\n- tests.child(data.name, data.to_raw())\n+ tests.child(data.name, data.to_spec())\n \n # Symlink tests directory to the plan work tree\n testdir = os.path.join(self.workdir, \"tests\")\n"
},
{
"change_type": "MODIFY",
"old_path": "tmt/steps/provision/testcloud.py",
"new_path": "tmt/steps/provision/testcloud.py",
"diff": "@@ -295,7 +295,7 @@ class ProvisionTestcloud(tmt.steps.provision.ProvisionPlugin):\n raise tmt.utils.SpecificationError(\n f\"Value '{value}' cannot be converted to int for '{int_key}' attribute.\")\n \n- for key, value in data.to_dict().items():\n+ for key, value in data.items():\n if key == 'memory':\n self.info('memory', f\"{value} MB\", 'green')\n elif key == 'disk':\n"
},
{
"change_type": "MODIFY",
"old_path": "tmt/utils.py",
"new_path": "tmt/utils.py",
"diff": "@@ -1328,19 +1328,16 @@ def option_to_key(option: str) -> str:\n return option.replace('-', '_')\n \n \n-SerializableContainerDerivedType = TypeVar(\n- 'SerializableContainerDerivedType',\n- bound='SerializableContainer')\n-\n-\n @dataclasses.dataclass\n-class SerializableContainer:\n- \"\"\"\n- A mixin class for objects that may be saved in files and restored later\n- \"\"\"\n+class DataContainer:\n+ \"\"\" A base class for objects that have keys and values \"\"\"\n \n def to_dict(self) -> Dict[str, Any]:\n- \"\"\" Return keys and values in the form of a dictionary \"\"\"\n+ \"\"\"\n+ Convert to a mapping.\n+\n+ See :ref:`classes.rst` for more details.\n+ \"\"\"\n \n return dataclasses.asdict(self)\n \n@@ -1365,7 +1362,7 @@ class SerializableContainer:\n yield from self.to_dict().items()\n \n @classmethod\n- def default(cls, key: str, default: Any = None) -> Any:\n+ def _default(cls, key: str, default: Any = None) -> Any:\n \"\"\"\n Return a default value for a given key.\n \n@@ -1416,6 +1413,48 @@ class SerializableContainer:\n \n return True\n \n+\n+SpecBasedContainerT = TypeVar('SpecBasedContainerT', bound='SpecBasedContainer')\n+\n+\n+class SpecBasedContainer(DataContainer):\n+ @classmethod\n+ def from_spec(cls: Type[SpecBasedContainerT], spec: Any) -> SpecBasedContainerT:\n+ \"\"\"\n+ Convert from a specification file or from a CLI option\n+\n+ See :ref:`classes.rst` for more details.\n+\n+ See :py:meth:`to_spec` for its counterpart.\n+ \"\"\"\n+\n+ raise NotImplementedError()\n+\n+ def to_spec(self) -> Dict[str, Any]:\n+ \"\"\"\n+ Convert to a form suitable for saving in a specification file\n+\n+ See :ref:`classes.rst` for more details.\n+\n+ See :py:meth:`from_spec` for its counterpart.\n+ \"\"\"\n+\n+ return self.to_dict()\n+\n+\n+SerializableContainerDerivedType = TypeVar(\n+ 'SerializableContainerDerivedType',\n+ bound='SerializableContainer')\n+\n+\n+@dataclasses.dataclass\n+class SerializableContainer(DataContainer):\n+ \"\"\" A mixin class for saving and loading objects \"\"\"\n+\n+ @classmethod\n+ def default(cls, key: str, default: Any = None) -> Any:\n+ return cls._default(key, default=default)\n+\n #\n # Moving data between containers and objects owning them\n #\n@@ -1449,10 +1488,9 @@ class SerializableContainer:\n \n def to_serialized(self) -> Dict[str, Any]:\n \"\"\"\n- Return keys and values in the form allowing later reconstruction.\n+ Convert to a form suitable for saving in a file.\n \n- Used to transform container into a structure one can save in a\n- YAML file, and restore it later.\n+ See :ref:`classes.rst` for more details.\n \n See :py:meth:`from_serialized` for its counterpart.\n \"\"\"\n@@ -1474,10 +1512,9 @@ class SerializableContainer:\n cls: Type[SerializableContainerDerivedType],\n serialized: Dict[str, Any]) -> SerializableContainerDerivedType:\n \"\"\"\n- Recreate container from its serialized form.\n+ Convert from a serialized form loaded from a file.\n \n- Used to transform data read from a YAML file into the original\n- container.\n+ See :ref:`classes.rst` for more details.\n \n See :py:meth:`to_serialized` for its counterpart.\n \"\"\"\n@@ -1493,7 +1530,7 @@ class SerializableContainer:\n def unserialize(serialized: Dict[str, Any]\n ) -> SerializableContainerDerivedType:\n \"\"\"\n- Recreate container from its serialized form.\n+ Convert from a serialized form loaded from a file.\n \n Similar to :py:meth:`from_serialized`, but this method knows\n nothing about container's class, and will locate the correct\n@@ -1505,6 +1542,10 @@ class SerializableContainer:\n containers when their classes are not know to the code.\n Restoring such containers requires inspection of serialized data\n and dynamic imports of modules as needed.\n+\n+ See :ref:`classes.rst` for more details.\n+\n+ See :py:meth:`to_serialized` for its counterpart.\n \"\"\"\n \n from tmt.plugins import import_member\n"
}
] |
134b710615faa45529757fd23eee012b2e8398a1 | teemtee/tmt | 08.12.2022 13:25:16 | MIT License | Drop various guest `wake()` methods in favor of parent class
They all do the same things anyway: instantiate a guest class, with
given data, pass parent and name down the stream. A base class can do
all of this. | [
{
"change_type": "MODIFY",
"old_path": "tmt/steps/provision/__init__.py",
"new_path": "tmt/steps/provision/__init__.py",
"diff": "@@ -1037,6 +1037,9 @@ class ProvisionPlugin(tmt.steps.GuestlessPlugin):\n # List of all supported methods aggregated from all plugins of the same step.\n _supported_methods: List[tmt.steps.Method] = []\n \n+ # TODO: Generics would provide a better type, https://github.com/teemtee/tmt/issues/1437\n+ _guest: Optional[Guest] = None\n+\n @classmethod\n def base_command(\n cls,\n@@ -1083,6 +1086,11 @@ class ProvisionPlugin(tmt.steps.GuestlessPlugin):\n \"\"\"\n super().wake()\n \n+ if data is not None:\n+ guest = self._guest_class(data=data, name=self.name, parent=self.step)\n+ guest.wake()\n+ self._guest = guest\n+\n def guest(self) -> Optional[Guest]:\n \"\"\"\n Return provisioned guest\n"
},
{
"change_type": "MODIFY",
"old_path": "tmt/steps/provision/artemis.py",
"new_path": "tmt/steps/provision/artemis.py",
"diff": "@@ -523,15 +523,6 @@ class ProvisionArtemis(tmt.steps.provision.ProvisionPlugin):\n ),\n ] + super().options(how)\n \n- # FIXME: ignore - https://github.com/teemtee/tmt/issues/1437\n- def wake(self, data: Optional[ArtemisGuestData] = None) -> None: # type: ignore[override]\n- \"\"\" Wake up the plugin, process data, apply options \"\"\"\n-\n- super().wake(data=data)\n-\n- if data:\n- self._guest = GuestArtemis(data=data, name=self.name, parent=self.step)\n-\n def go(self) -> None:\n \"\"\" Provision the guest \"\"\"\n super().go()\n"
},
{
"change_type": "MODIFY",
"old_path": "tmt/steps/provision/connect.py",
"new_path": "tmt/steps/provision/connect.py",
"diff": "@@ -8,7 +8,6 @@ import tmt\n import tmt.steps\n import tmt.steps.provision\n import tmt.utils\n-from tmt.steps.provision import GuestSshData\n \n DEFAULT_USER = \"root\"\n \n@@ -79,13 +78,6 @@ class ProvisionConnect(tmt.steps.provision.ProvisionPlugin):\n help='Password for login into the guest system.'),\n ] + super().options(how)\n \n- # FIXME: ignore - https://github.com/teemtee/tmt/issues/1437\n- def wake(self, data: Optional[GuestSshData] = None) -> None: # type: ignore[override]\n- \"\"\" Wake up the plugin, process data, apply options \"\"\"\n- super().wake(data=data)\n- if data:\n- self._guest = tmt.GuestSsh(data=data, name=self.name, parent=self.step)\n-\n def go(self) -> None:\n \"\"\" Prepare the connection \"\"\"\n super().go()\n"
},
{
"change_type": "MODIFY",
"old_path": "tmt/steps/provision/local.py",
"new_path": "tmt/steps/provision/local.py",
"diff": "@@ -121,12 +121,6 @@ class ProvisionLocal(tmt.steps.provision.ProvisionPlugin):\n # Guest instance\n _guest = None\n \n- def wake(self, data: Optional[tmt.steps.provision.GuestData] = None) -> None:\n- \"\"\" Wake up the plugin, process data, apply options \"\"\"\n- super().wake(data=data)\n- if data:\n- self._guest = GuestLocal(data=data, name=self.name, parent=self.step)\n-\n def go(self) -> None:\n \"\"\" Provision the container \"\"\"\n super().go()\n"
},
{
"change_type": "MODIFY",
"old_path": "tmt/steps/provision/podman.py",
"new_path": "tmt/steps/provision/podman.py",
"diff": "@@ -256,15 +256,6 @@ class ProvisionPodman(tmt.steps.provision.ProvisionPlugin):\n \n return super().default(option, default=default)\n \n- def wake(self, data: Optional[tmt.steps.provision.GuestData] = None) -> None:\n- \"\"\" Wake up the plugin, process data, apply options \"\"\"\n- super().wake(data=data)\n- # Wake up podman instance\n- if data:\n- guest = GuestContainer(data=data, name=self.name, parent=self.step)\n- guest.wake()\n- self._guest = guest\n-\n def go(self) -> None:\n \"\"\" Provision the container \"\"\"\n super().go()\n"
},
{
"change_type": "MODIFY",
"old_path": "tmt/steps/provision/testcloud.py",
"new_path": "tmt/steps/provision/testcloud.py",
"diff": "@@ -615,17 +615,6 @@ class ProvisionTestcloud(tmt.steps.provision.ProvisionPlugin):\n help=\"What architecture to virtualize, host arch by default.\"),\n ] + super().options(how)\n \n- # FIXME: ignore - https://github.com/teemtee/tmt/issues/1437\n- def wake(self, data: Optional[TestcloudGuestData] = None) -> None: # type: ignore[override]\n- \"\"\" Wake up the plugin, process data, apply options \"\"\"\n- super().wake(data=data)\n-\n- # Wake up testcloud instance\n- if data:\n- guest = GuestTestcloud(data=data, name=self.name, parent=self.step)\n- guest.wake()\n- self._guest = guest\n-\n def go(self) -> None:\n \"\"\" Provision the testcloud instance \"\"\"\n super().go()\n"
}
] |
7b18ddcc6dd7f3f708e0681411033839abc0a203 | uma-pi1/kge | 14.08.2019 17:33:08 | MIT License | Fix resuming of SOBOL generator during ax search
This is done by fixing the seed and regenerating trials already performed. | [
{
"change_type": "MODIFY",
"old_path": "kge/job/auto_search.py",
"new_path": "kge/job/auto_search.py",
"diff": "@@ -113,10 +113,18 @@ class AutoSearchJob(SearchJob):\n self.trial_ids.append(trial_id)\n self.parameters.append(parameters)\n self.results.append(None)\n+ self.config.log(\n+ \"Created trial {} with parameters: {}\".format(\n+ trial_no, parameters\n+ )\n+ )\n else:\n # use the trial of a resumed run of this job\n parameters, trial_id = self.register_trial(self.parameters[trial_no])\n self.trial_ids.append(trial_id)\n+ self.config.log(\n+ \"Resumed trial {} with parameters: {}\".format(trial_no, parameters)\n+ )\n \n # create job for trial\n if trial_id is not None:\n"
},
{
"change_type": "MODIFY",
"old_path": "kge/job/ax_search.py",
"new_path": "kge/job/ax_search.py",
"diff": "@@ -38,16 +38,19 @@ class AxSearchJob(AutoSearchJob):\n enforce_num_arms=True,\n ),\n GenerationStep(\n- model=Models.GPEI, num_arms=-1, recommended_max_parallelism=3,\n- model_gen_kwargs=\n- {'fixed_features':\n- ObservationFeatures(\n- parameters={\n- kv['name']:kv['value']\n- for kv in self.config.get(\"ax_search.fixed_parameters\")\n- }\n- )\n- }\n+ model=Models.GPEI,\n+ num_arms=-1,\n+ recommended_max_parallelism=3,\n+ model_gen_kwargs={\n+ \"fixed_features\": ObservationFeatures(\n+ parameters={\n+ kv[\"name\"]: kv[\"value\"]\n+ for kv in self.config.get(\n+ \"ax_search.fixed_parameters\"\n+ )\n+ }\n+ )\n+ },\n ),\n ],\n )\n@@ -61,33 +64,38 @@ class AxSearchJob(AutoSearchJob):\n objective_name=\"metric_value\",\n minimize=False,\n parameter_constraints=self.config.get(\"ax_search.parameter_constraints\"),\n-\n )\n self.config.log(\n- \"ax search initialized with {}\".format(\n- self.ax_client.generation_strategy\n- )\n+ \"ax search initialized with {}\".format(self.ax_client.generation_strategy)\n )\n \n- # By default, ax first uses a Sobol strategy for a certain number of arms,\n- # and is maybe followed by Bayesian Optimization. If we resume this job,\n- # some of the Sobol arms may have already been generated. The corresponding\n- # arms will be registered later (when this job's run method is executed),\n- # but here we already change the generation strategy to take account of\n- # these configurations.\n- num_generated = len(self.parameters)\n- if num_generated > 0:\n- old_curr = self.ax_client.generation_strategy._curr\n- new_num_arms = max(0, old_curr.num_arms - num_generated)\n- new_curr = old_curr._replace(num_arms=new_num_arms)\n- self.ax_client.generation_strategy._curr = new_curr\n- self.config.log(\n- \"Reduced number of arms for first generation step of \"\n- + \"ax_client from {} to {} due to prior data.\".format(\n- old_curr.num_arms, new_curr.num_arms\n- )\n+ # Make sure sobol models are resumed correctly\n+ if self.ax_client.generation_strategy._curr.model == Models.SOBOL:\n+ # Fix seed for sobol. We do this by generating the model right away (instead\n+ # of automatically once first trial is generated).\n+ self.ax_client.generation_strategy._set_current_model(\n+ experiment=self.ax_client.experiment, data=None, seed=0\n )\n \n+ # Regenerate and drop SOBOL arms already generated. Since we fixed the seed,\n+ # we will skip exactly the arms already generated in the job being resumed.\n+ num_generated = len(self.parameters)\n+ if num_generated > 0:\n+ num_sobol_generated = min(\n+ self.ax_client.generation_strategy._curr.num_arms, num_generated\n+ )\n+ for i in range(num_sobol_generated):\n+ generator_run = self.ax_client.generation_strategy.gen(\n+ experiment=self.ax_client.experiment\n+ )\n+ # self.config.log(\"Skipped parameters: {}\".format(generator_run.arms))\n+ self.config.log(\n+ \"Skipped {} of {} Sobol trials due to prior data.\".format(\n+ num_sobol_generated,\n+ self.ax_client.generation_strategy._curr.num_arms,\n+ )\n+ )\n+\n def register_trial(self, parameters=None):\n trial_id = None\n try:\n"
}
] |
70e2d9a7f62bfc56ea2469cc25384ced00e5741e | uma-pi1/kge | 11.12.2019 16:28:21 | MIT License | Support for dataset-specific configurations and metadata
preprocess now creates a dataset.yaml file with information about
the dataset. | [
{
"change_type": "MODIFY",
"old_path": "data/download_all.sh",
"new_path": "data/download_all.sh",
"diff": "@@ -13,7 +13,7 @@ if [ ! -d \"$BASEDIR/toy\" ]; then\n cd $BASEDIR\n curl -O https://www.uni-mannheim.de/media/Einrichtungen/dws/pi1/kge_datasets/toy.tar.gz\n tar xvf toy.tar.gz\n- python preprocess.py --folder toy\n+ python preprocess.py toy\n else\n echo toy already present\n fi\n@@ -39,7 +39,7 @@ if [ ! -d \"$BASEDIR/fb15k\" ]; then\n ;;\n esac\n cd ..\n- python preprocess.py --folder fb15k\n+ python preprocess.py fb15k\n else\n echo fb15k already present\n fi\n@@ -50,7 +50,7 @@ if [ ! -d \"$BASEDIR/fb15k-237\" ]; then\n cd $BASEDIR\n curl -O https://www.uni-mannheim.de/media/Einrichtungen/dws/pi1/kge_datasets/fb15k-237.tar.gz\n tar xvf fb15k-237.tar.gz\n- python preprocess.py --folder fb15k-237\n+ python preprocess.py fb15k-237\n else\n echo fb15k-237 already present\n fi\n@@ -75,7 +75,7 @@ if [ ! -d \"$BASEDIR/wn18\" ]; then\n ;;\n esac\n cd ..\n- python preprocess.py --folder wn18\n+ python preprocess.py wn18\n else\n echo wn18 already present\n fi\n@@ -86,7 +86,7 @@ if [ ! -d \"$BASEDIR/wnrr\" ]; then\n cd $BASEDIR\n curl -O https://www.uni-mannheim.de/media/Einrichtungen/dws/pi1/kge_datasets/wnrr.tar.gz\n tar xvf wnrr.tar.gz\n- python preprocess.py --folder wnrr\n+ python preprocess.py wnrr\n else\n echo wnrr already present\n fi\n@@ -98,7 +98,7 @@ if [ ! -d \"$BASEDIR/dbpedia50\" ]; then\n cd $BASEDIR\n curl -O https://www.uni-mannheim.de/media/Einrichtungen/dws/pi1/kge_datasets/dbpedia50.tar.gz\n tar xvf dbpedia50.tar.gz\n- python preprocess.py --folder dbpedia50\n+ python preprocess.py dbpedia50\n else\n echo dbpedia50 already present\n fi\n@@ -109,7 +109,7 @@ if [ ! -d \"$BASEDIR/dbpedia500\" ]; then\n cd $BASEDIR\n curl -O https://www.uni-mannheim.de/media/Einrichtungen/dws/pi1/kge_datasets/dbpedia500.tar.gz\n tar xvf dbpedia500.tar.gz\n- python preprocess.py --folder dbpedia500 --order_sop\n+ python preprocess.py dbpedia500 --order_sop\n else\n echo dbpedia500 already present\n fi\n@@ -134,7 +134,7 @@ if [ ! -d \"$BASEDIR/db100k\" ]; then\n ;;\n esac\n cd ..\n- python preprocess.py --folder db100k\n+ python preprocess.py db100k\n else\n echo db100k already present\n fi\n@@ -145,7 +145,7 @@ if [ ! -d \"$BASEDIR/yago3-10\" ]; then\n cd $BASEDIR\n curl -O https://www.uni-mannheim.de/media/Einrichtungen/dws/pi1/kge_datasets/yago3-10.tar.gz\n tar xvf yago3-10.tar.gz\n- python preprocess.py --folder yago3-10\n+ python preprocess.py yago3-10\n else\n echo yago3-10 already present\n fi\n"
},
{
"change_type": "MODIFY",
"old_path": "data/preprocess.py",
"new_path": "data/preprocess.py",
"diff": "@@ -1,65 +1,103 @@\n+#!/usr/bin/env python\n+\"\"\"Preprocess a KGE dataset into a the format expected by libkge.\n+\n+Call as `preprocess.py --folder <name>`. The original dataset should be stored in\n+subfolder `name` and have files \"train.txt\", \"valid.txt\", and \"test.txt\". Each file\n+contains one SPO triple per line, separated by tabs.\n+\n+During preprocessing, each distinct entity name and each distinct distinct relation name\n+is assigned an index (dense). The index-to-object mapping is stored in files\n+\"entity_map.del\" and \"relation_map.del\", resp. The triples (as indexes) are stored in\n+files \"train.del\", \"valid.del\", and \"test.del\". Metadata information is stored in a file\n+\"dataset.yaml\".\n+\n+\"\"\"\n+\n import argparse\n+import yaml\n+import os.path\n+from collections import OrderedDict\n \n \n-def index(symbols, file):\n- with open(file, \"w\") as f:\n- for i, k in symbols.items():\n- f.write(str(k) + \"\\t\" + str(i) + \"\\n\")\n+def store_map(symbol_map, filename):\n+ with open(filename, \"w\") as f:\n+ for symbol, index in symbol_map.items():\n+ f.write(f\"{index}\\t{symbol}\\n\")\n \n \n if __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n- parser.add_argument(\"--folder\", type=str)\n+ parser.add_argument(\"folder\", type=str)\n parser.add_argument(\"--order_sop\", action=\"store_true\")\n args = parser.parse_args()\n \n- print(\"Preprocessing \" + args.folder)\n+ print(f\"Preprocessing {args.folder}...\")\n raw_split_files = {\"train\": \"train.txt\", \"valid\": \"valid.txt\", \"test\": \"test.txt\"}\n split_files = {\"train\": \"train.del\", \"valid\": \"valid.del\", \"test\": \"test.del\"}\n+ split_sizes = {}\n \n- sub = 0\n- rel = 1\n- obj = 2\n if args.order_sop:\n- obj = 1\n- rel = 2\n+ S, P, O = 0, 2, 1\n+ else:\n+ S, P, O = 0, 1, 2\n \n- # read data and collect entity and relation names\n+ # read data and collect entities and relations\n raw = {}\n entities = {}\n relations = {}\n ent_id = 0\n rel_id = 0\n- for k, file in raw_split_files.items():\n- with open(args.folder + \"/\" + file, \"r\") as f:\n- raw[k] = list(map(lambda s: s.strip().split(\"\\t\"), f.readlines()))\n- for t in raw[k]:\n- if t[sub] not in entities:\n- entities[t[sub]] = ent_id\n+ for split, filename in raw_split_files.items():\n+ with open(args.folder + \"/\" + filename, \"r\") as f:\n+ raw[split] = list(map(lambda s: s.strip().split(\"\\t\"), f.readlines()))\n+ for t in raw[split]:\n+ if t[S] not in entities:\n+ entities[t[S]] = ent_id\n ent_id += 1\n- if t[rel] not in relations:\n- relations[t[rel]] = rel_id\n+ if t[P] not in relations:\n+ relations[t[P]] = rel_id\n rel_id += 1\n- if t[obj] not in entities:\n- entities[t[obj]] = ent_id\n+ if t[O] not in entities:\n+ entities[t[O]] = ent_id\n ent_id += 1\n- print(str(len(raw[k])) + \" triples in \" + file)\n+ print(\n+ f\"Found {len(raw[split])} triples in {split} split \"\n+ f\"(file: {filename}).\"\n+ )\n+ split_sizes[split + \"_size\"] = len(raw[split])\n+\n+ print(f\"{len(relations)} distinct relations\")\n+ print(f\"{len(entities)} distinct entities\")\n+ print(\"Writing relation and entity map...\")\n+ store_map(relations, os.path.join(args.folder, \"relation_map.del\"))\n+ store_map(entities, os.path.join(args.folder, \"entity_map.del\"))\n+ print(\"Done.\")\n \n- print(str(len(relations)) + \" distinct relations\")\n- print(str(len(entities)) + \" distinct entities\")\n- print(\"Writing indexes...\")\n- index(relations, args.folder + \"/relation_map.del\")\n- index(entities, args.folder + \"/entity_map.del\")\n+ # write config\n+ print(\"Writing dataset.yaml...\")\n+ dataset_config = dict(\n+ name=args.folder,\n+ entity_map=\"entity_map.del\",\n+ relation_map=\"relation_map.del\",\n+ num_entities=len(entities),\n+ num_relations=len(relations),\n+ **split_files,\n+ **split_sizes,\n+ )\n+ print(yaml.dump(dict(dataset=dataset_config)))\n+ with open(os.path.join(args.folder, \"dataset.yaml\"), \"w+\") as filename:\n+ filename.write(yaml.dump(dict(dataset=dataset_config)))\n \n- # write out\n+ # write out triples using indexes\n print(\"Writing triples...\")\n- for k, file in split_files.items():\n- with open(args.folder + \"/\" + file, \"w\") as f:\n- for t in raw[k]:\n- f.write(str(entities[t[sub]]))\n- f.write(\"\\t\")\n- f.write(str(relations[t[rel]]))\n- f.write(\"\\t\")\n- f.write(str(entities[t[obj]]))\n- f.write(\"\\n\")\n- print(\"Done\")\n+ for split, filename in split_files.items():\n+ with open(os.path.join(args.folder, filename), \"w\") as f:\n+ for t in raw[split]:\n+ f.write(\n+ str(entities[t[S]])\n+ + \"\\t\"\n+ + str(relations[t[P]])\n+ + \"\\t\"\n+ + str(entities[t[O]])\n+ + \"\\n\"\n+ )\n"
},
{
"change_type": "MODIFY",
"old_path": "kge/config-default.yaml",
"new_path": "kge/config-default.yaml",
"diff": "@@ -19,6 +19,8 @@ random_seed:\n \n dataset:\n # Specify a dataset here. There must be a folder of that name under \"data/\".\n+ # If this folder contains a dataset.yaml file, it overrides the defaults\n+ # specified below.\n name: 'toy'\n \n # Names of the training, validation, and test data files. Each file has the\n@@ -39,6 +41,8 @@ dataset:\n entity_map: entity_map.del\n relation_map: relation_map.del\n \n+ # other dataset specific keys\n+ +++: +++\n \n ## MODEL #######################################################################\n \n"
},
{
"change_type": "MODIFY",
"old_path": "kge/dataset.py",
"new_path": "kge/dataset.py",
"diff": "@@ -5,11 +5,12 @@ from collections import defaultdict, OrderedDict\n import torch\n import numpy as np\n \n+from kge import Config, Configurable\n from kge.util.misc import kge_base_dir\n \n \n # TODO add support to pickle dataset (and indexes) and reload from there\n-class Dataset:\n+class Dataset(Configurable):\n def __init__(\n self,\n config,\n@@ -24,7 +25,7 @@ class Dataset:\n test,\n test_meta,\n ):\n- self.config = config\n+ super().__init__(config, \"dataset\")\n self.num_entities = num_entities\n self.entities = entities # array: entity index -> metadata array of strings\n self.num_relations = num_relations\n@@ -44,11 +45,14 @@ class Dataset:\n self.indexes = {} # map: name of index -> index (used mainly by training jobs)\n \n @staticmethod\n- def load(config):\n+ def load(config: Config):\n name = config.get(\"dataset.name\")\n- config.log(\"Loading dataset \" + name + \"...\")\n- base_dir = os.path.join(kge_base_dir(), \"data/\" + name)\n+ base_dir = os.path.join(kge_base_dir(), \"data\", name)\n+ if os.path.isfile(os.path.join(base_dir, \"dataset.yaml\")):\n+ config.log(\"Loading configuration of dataset \" + name + \"...\")\n+ config.load(os.path.join(base_dir, \"dataset.yaml\"))\n \n+ config.log(\"Loading dataset \" + name + \"...\")\n num_entities, entities = Dataset._load_map(\n os.path.join(base_dir, config.get(\"dataset.entity_map\"))\n )\n"
}
] |
bd4553f1952b25ceaf82e62eb525a142b83714dc | uma-pi1/kge | 18.12.2019 19:35:57 | MIT License | Add support for relation prediction
Added a score_so method to KgeModel for this purpose and a default
implementation in RelationalScorer (which is currently used by all models). | [
{
"change_type": "MODIFY",
"old_path": "kge/model/complex.py",
"new_path": "kge/model/complex.py",
"diff": "@@ -39,7 +39,7 @@ class ComplExScorer(RelationalScorer):\n elif combine == \"*po\":\n out = (r_all * o_all).mm(s_all.transpose(0, 1))\n else:\n- raise ValueError('cannot handle combine=\"{}\".format(combine)')\n+ return super().score_emb(s_emb, p_emb, o_emb, combine)\n \n return out.view(n, -1)\n \n"
},
{
"change_type": "MODIFY",
"old_path": "kge/model/distmult.py",
"new_path": "kge/model/distmult.py",
"diff": "@@ -21,7 +21,7 @@ class DistMultScorer(RelationalScorer):\n elif combine == \"*po\":\n out = (o_emb * p_emb).mm(s_emb.transpose(0, 1))\n else:\n- raise ValueError('cannot handle combine=\"{}\".format(combine)')\n+ return super().score_emb(s_emb, p_emb, o_emb, combine)\n \n return out.view(n, -1)\n \n"
},
{
"change_type": "MODIFY",
"old_path": "kge/model/experimental/freex.py",
"new_path": "kge/model/experimental/freex.py",
"diff": "@@ -27,7 +27,7 @@ class FreexScorer(RelationalScorer):\n elif combine == \"*po\":\n out = (p_emb * o_all).mm(s_all.transpose(0, 1))\n else:\n- raise ValueError('cannot handle combine=\"{}\".format(combine)')\n+ return super().score_emb(s_emb, p_emb, o_emb, combine)\n \n return out.view(n, -1)\n \n"
},
{
"change_type": "MODIFY",
"old_path": "kge/model/kge_model.py",
"new_path": "kge/model/kge_model.py",
"diff": "@@ -152,6 +152,14 @@ class RelationalScorer(KgeBase):\n p_embs = p_emb.repeat_interleave(n_s, 0)\n o_embs = o_emb.repeat_interleave(n_s, 0)\n out = self.score_emb_spo(s_embs, p_embs, o_embs)\n+ elif combine == \"s*o\":\n+ n = s_emb.size(0)\n+ assert o_emb.size(0) == n\n+ n_p = p_emb.size(0)\n+ s_embs = s_emb.repeat_interleave(n_p, 0)\n+ p_embs = p_emb.repeat((n,1))\n+ o_embs = o_emb.repeat_interleave(n_p, 0)\n+ out = self.score_emb_spo(s_embs, p_embs, o_embs)\n else:\n raise ValueError('cannot handle combine=\"{}\".format(combine)')\n \n@@ -372,7 +380,7 @@ class KgeModel(KgeBase):\n \n @staticmethod\n def load_from_checkpoint(\n- filename: str, dataset=None, use_tmp_log_folder=True, device=None\n+ filename: str, dataset=None, use_tmp_log_folder=True, device=None\n ) -> \"KgeModel\":\n \"\"\"Loads a model from a checkpoint file of a training job.\n \n@@ -393,6 +401,7 @@ class KgeModel(KgeBase):\n config.set(\"job.device\", device)\n if use_tmp_log_folder:\n import tempfile\n+\n config.log_folder = tempfile.mkdtemp(prefix=\"kge-\")\n if dataset is None:\n dataset = Dataset.load(config)\n@@ -502,6 +511,28 @@ class KgeModel(KgeBase):\n \n return self._scorer.score_emb(s, p, o, combine=\"*po\")\n \n+ def score_so(self, s: Tensor, o: Tensor, p: Tensor = None) -> Tensor:\n+ r\"\"\"Compute scores for triples formed from a set of so-pairs and all (or a subset of the) relations.\n+\n+ `s` and `o` are vectors of common size :math:`n`, holding the indexes of the\n+ subjects and objects to score.\n+\n+ Returns an :math:`n\\times R` tensor, where :math:`R` is the total number of\n+ known relations. The :math:`(i,j)`-entry holds the score for triple :math:`(s_i,\n+ j, o_i)`.\n+\n+ If `p` is not None, it is a vector holding the indexes of the relations to score.\n+\n+ \"\"\"\n+ s = self.get_s_embedder().embed(s)\n+ o = self.get_o_embedder().embed(o)\n+ if p is None:\n+ p = self.get_p_embedder().embed_all()\n+ else:\n+ p = self.get_p_embedder().embed(p)\n+\n+ return self._scorer.score_emb(s, p, o, combine=\"s*o\")\n+\n def score_sp_po(\n self, s: Tensor, p: Tensor, o: Tensor, entity_subset: Tensor = None\n ) -> Tensor:\n"
},
{
"change_type": "MODIFY",
"old_path": "kge/model/reciprocal_relations_model.py",
"new_path": "kge/model/reciprocal_relations_model.py",
"diff": "@@ -71,8 +71,16 @@ class ReciprocalRelationsModel(KgeModel):\n o = self.get_o_embedder().embed(o)\n return self._scorer.score_emb(o, p, s, combine=\"sp*\")\n \n- def score_sp_po(self, s: torch.Tensor, p: torch.Tensor, o: torch.Tensor,\n- entity_subset: torch.Tensor = None) -> torch.Tensor:\n+ def score_so(self, s, o, p=None):\n+ raise Exception(\"The reciprocal relations model cannot score relations.\")\n+\n+ def score_sp_po(\n+ self,\n+ s: torch.Tensor,\n+ p: torch.Tensor,\n+ o: torch.Tensor,\n+ entity_subset: torch.Tensor = None,\n+ ) -> torch.Tensor:\n s = self.get_s_embedder().embed(s)\n p_inv = self.get_p_embedder().embed(p + self.dataset.num_relations)\n p = self.get_p_embedder().embed(p)\n"
},
{
"change_type": "MODIFY",
"old_path": "kge/model/rescal.py",
"new_path": "kge/model/rescal.py",
"diff": "@@ -48,7 +48,7 @@ class RescalScorer(RelationalScorer):\n .mm(s_emb.transpose(0, 1))\n )\n else:\n- raise ValueError('cannot handle combine=\"{}\".format(combine)')\n+ super().score_emb(s_emb, p_emb, o_emb, combine)\n \n return out.view(batch_size, -1)\n \n"
},
{
"change_type": "MODIFY",
"old_path": "kge/model/transe.py",
"new_path": "kge/model/transe.py",
"diff": "@@ -30,7 +30,7 @@ class TransEScorer(RelationalScorer):\n # for i in range(n):\n # out[i, :] = -F.pairwise_distance(po_emb[i, :], s_emb, p=self._norm)\n else:\n- raise ValueError('cannot handle combine=\"{}\".format(combine)')\n+ super().score_emb(s_emb, p_emb, o_emb, combine)\n return out.view(n, -1)\n \n \n"
}
] |
056ad38583782f9315eed3d6aa607d5d7be2e438 | uma-pi1/kge | 26.03.2020 11:10:12 | MIT License | Rename queries sp*/s*o/*po to sp_/s_o/_po
This makes them valid identifiers, which allows to use query types as
configuration keys (for example). | [
{
"change_type": "MODIFY",
"old_path": "kge/job/train.py",
"new_path": "kge/job/train.py",
"diff": "@@ -935,7 +935,7 @@ class TrainingJobNegativeSampling(TrainingJob):\n \n \n class TrainingJob1vsAll(TrainingJob):\n- \"\"\"Samples SPO pairs and queries sp* and *po, treating all other entities as negative.\"\"\"\n+ \"\"\"Samples SPO pairs and queries sp_ and _po, treating all other entities as negative.\"\"\"\n \n def __init__(self, config, dataset, parent_job=None):\n super().__init__(config, dataset, parent_job)\n"
},
{
"change_type": "MODIFY",
"old_path": "kge/model/complex.py",
"new_path": "kge/model/complex.py",
"diff": "@@ -34,9 +34,9 @@ class ComplExScorer(RelationalScorer):\n \n if combine == \"spo\":\n out = (s_all * o_all * r_all).sum(dim=1)\n- elif combine == \"sp*\":\n+ elif combine == \"sp_\":\n out = (s_all * r_all).mm(o_all.transpose(0, 1))\n- elif combine == \"*po\":\n+ elif combine == \"_po\":\n out = (r_all * o_all).mm(s_all.transpose(0, 1))\n else:\n return super().score_emb(s_emb, p_emb, o_emb, combine)\n"
},
{
"change_type": "MODIFY",
"old_path": "kge/model/conve.py",
"new_path": "kge/model/conve.py",
"diff": "@@ -83,7 +83,7 @@ class ConvEScorer(RelationalScorer):\n out = self.projection_dropout(out)\n out = self.bn2(out)\n out = self.non_linear(out)\n- if combine == \"sp*\":\n+ if combine == \"sp_\":\n out = torch.mm(out, o_emb[:, 1:].transpose(1, 0))\n elif combine == \"spo\":\n out = (out * o_emb[:, 1:]).sum(-1)\n"
},
{
"change_type": "MODIFY",
"old_path": "kge/model/cp.py",
"new_path": "kge/model/cp.py",
"diff": "@@ -21,9 +21,9 @@ class CPScorer(RelationalScorer):\n \n if combine == \"spo\":\n out = (s_emb_h * p_emb * o_emb_t).sum(dim=1)\n- elif combine == \"sp*\":\n+ elif combine == \"sp_\":\n out = (s_emb_h * p_emb).mm(o_emb_t.transpose(0, 1))\n- elif combine == \"*po\":\n+ elif combine == \"_po\":\n out = (o_emb_t * p_emb).mm(s_emb_h.transpose(0, 1))\n else:\n return super().score_emb(s_emb, p_emb, o_emb, combine)\n"
},
{
"change_type": "MODIFY",
"old_path": "kge/model/distmult.py",
"new_path": "kge/model/distmult.py",
"diff": "@@ -16,9 +16,9 @@ class DistMultScorer(RelationalScorer):\n \n if combine == \"spo\":\n out = (s_emb * p_emb * o_emb).sum(dim=1)\n- elif combine == \"sp*\":\n+ elif combine == \"sp_\":\n out = (s_emb * p_emb).mm(o_emb.transpose(0, 1))\n- elif combine == \"*po\":\n+ elif combine == \"_po\":\n out = (o_emb * p_emb).mm(s_emb.transpose(0, 1))\n else:\n return super().score_emb(s_emb, p_emb, o_emb, combine)\n"
},
{
"change_type": "MODIFY",
"old_path": "kge/model/kge_model.py",
"new_path": "kge/model/kge_model.py",
"diff": "@@ -112,7 +112,7 @@ class RelationalScorer(KgeBase):\n :math:`d_r` are the sizes of the entity and relation embeddings, respectively.\n \n The provided embeddings are combined based on the value of `combine`. Common\n- values are :code:`\"spo\"`, :code:`\"sp*\"`, and :code:`\"*po\"`. Not all models may\n+ values are :code:`\"spo\"`, :code:`\"sp_\"`, and :code:`\"_po\"`. Not all models may\n support all combinations.\n \n When `combine` is :code:`\"spo\"`, then embeddings are combined row-wise. In this\n@@ -121,13 +121,13 @@ class RelationalScorer(KgeBase):\n which the :math:`i`-th entry holds the score of the embedding triple\n :math:`(s_i, p_i, o_i)`.\n \n- When `combine` is :code:`\"sp*\"`, the subjects and predicates are taken row-wise\n+ When `combine` is :code:`\"sp_\"`, the subjects and predicates are taken row-wise\n and subsequently combined with all objects. In this case, it is required that\n :math:`n_s=n_p=n`. The output is a :math`n\\times n_o` tensor, in which the\n :math:`(i,j)`-th entry holds the score of the embedding triple :math:`(s_i, p_i,\n o_j)`.\n \n- When `combine` is :code:`\"*po\"`, predicates and objects are taken row-wise and\n+ When `combine` is :code:`\"_po\"`, predicates and objects are taken row-wise and\n subsequently combined with all subjects. In this case, it is required that\n :math:`n_p=n_o=n`. The output is a :math`n\\times n_s` tensor, in which the\n :math:`(i,j)`-th entry holds the score of the embedding triple :math:`(s_j, p_i,\n@@ -139,21 +139,21 @@ class RelationalScorer(KgeBase):\n if combine == \"spo\":\n assert s_emb.size(0) == n and o_emb.size(0) == n\n out = self.score_emb_spo(s_emb, p_emb, o_emb)\n- elif combine == \"sp*\":\n+ elif combine == \"sp_\":\n assert s_emb.size(0) == n\n n_o = o_emb.size(0)\n s_embs = s_emb.repeat_interleave(n_o, 0)\n p_embs = p_emb.repeat_interleave(n_o, 0)\n o_embs = o_emb.repeat((n, 1))\n out = self.score_emb_spo(s_embs, p_embs, o_embs)\n- elif combine == \"*po\":\n+ elif combine == \"_po\":\n assert o_emb.size(0) == n\n n_s = s_emb.size(0)\n s_embs = s_emb.repeat((n, 1))\n p_embs = p_emb.repeat_interleave(n_s, 0)\n o_embs = o_emb.repeat_interleave(n_s, 0)\n out = self.score_emb_spo(s_embs, p_embs, o_embs)\n- elif combine == \"s*o\":\n+ elif combine == \"s_o\":\n n = s_emb.size(0)\n assert o_emb.size(0) == n\n n_p = p_emb.size(0)\n@@ -491,7 +491,7 @@ class KgeModel(KgeBase):\n else:\n o = self.get_o_embedder().embed(o)\n \n- return self._scorer.score_emb(s, p, o, combine=\"sp*\")\n+ return self._scorer.score_emb(s, p, o, combine=\"sp_\")\n \n def score_po(self, p: Tensor, o: Tensor, s: Tensor = None) -> Tensor:\n r\"\"\"Compute scores for triples formed from a set of po-pairs and (or a subset of the) subjects.\n@@ -514,7 +514,7 @@ class KgeModel(KgeBase):\n o = self.get_o_embedder().embed(o)\n p = self.get_p_embedder().embed(p)\n \n- return self._scorer.score_emb(s, p, o, combine=\"*po\")\n+ return self._scorer.score_emb(s, p, o, combine=\"_po\")\n \n def score_so(self, s: Tensor, o: Tensor, p: Tensor = None) -> Tensor:\n r\"\"\"Compute scores for triples formed from a set of so-pairs and all (or a subset of the) relations.\n@@ -536,7 +536,7 @@ class KgeModel(KgeBase):\n else:\n p = self.get_p_embedder().embed(p)\n \n- return self._scorer.score_emb(s, p, o, combine=\"s*o\")\n+ return self._scorer.score_emb(s, p, o, combine=\"s_o\")\n \n def score_sp_po(\n self, s: Tensor, p: Tensor, o: Tensor, entity_subset: Tensor = None\n@@ -567,8 +567,8 @@ class KgeModel(KgeBase):\n all_entities = self.get_s_embedder().embed(entity_subset)\n else:\n all_entities = self.get_s_embedder().embed_all()\n- sp_scores = self._scorer.score_emb(s, p, all_entities, combine=\"sp*\")\n- po_scores = self._scorer.score_emb(all_entities, p, o, combine=\"*po\")\n+ sp_scores = self._scorer.score_emb(s, p, all_entities, combine=\"sp_\")\n+ po_scores = self._scorer.score_emb(all_entities, p, o, combine=\"_po\")\n else:\n if entity_subset is not None:\n all_objects = self.get_o_embedder().embed(entity_subset)\n@@ -576,6 +576,6 @@ class KgeModel(KgeBase):\n else:\n all_objects = self.get_o_embedder().embed_all()\n all_subjects = self.get_s_embedder().embed_all()\n- sp_scores = self._scorer.score_emb(s, p, all_objects, combine=\"sp*\")\n- po_scores = self._scorer.score_emb(all_subjects, p, o, combine=\"*po\")\n+ sp_scores = self._scorer.score_emb(s, p, all_objects, combine=\"sp_\")\n+ po_scores = self._scorer.score_emb(all_subjects, p, o, combine=\"_po\")\n return torch.cat((sp_scores, po_scores), dim=1)\n"
},
{
"change_type": "MODIFY",
"old_path": "kge/model/reciprocal_relations_model.py",
"new_path": "kge/model/reciprocal_relations_model.py",
"diff": "@@ -8,8 +8,8 @@ class ReciprocalRelationsModel(KgeModel):\n \"\"\"Modifies a base model to use different relation embeddings for predicting subject and object.\n \n This implements the reciprocal relations training procedure of [TODO cite ConvE].\n- Note that this model cannot be used to score a single triple, but only to rank sp*\n- or *po questions.\n+ Note that this model cannot be used to score a single triple, but only to rank sp_\n+ or _po questions.\n \n \"\"\"\n \n@@ -58,7 +58,7 @@ class ReciprocalRelationsModel(KgeModel):\n s = self.get_s_embedder().embed(s)\n p = self.get_p_embedder().embed(p + self.dataset.num_relations())\n o = self.get_o_embedder().embed(o)\n- return self._scorer.score_emb(o, p, s, combine=\"sp*\")\n+ return self._scorer.score_emb(o, p, s, combine=\"sp_\")\n \n def score_so(self, s, o, p=None):\n raise Exception(\"The reciprocal relations model cannot score relations.\")\n@@ -79,8 +79,8 @@ class ReciprocalRelationsModel(KgeModel):\n all_entities = self.get_s_embedder().embed(entity_subset)\n else:\n all_entities = self.get_s_embedder().embed_all()\n- sp_scores = self._scorer.score_emb(s, p, all_entities, combine=\"sp*\")\n- po_scores = self._scorer.score_emb(o, p_inv, all_entities, combine=\"sp*\")\n+ sp_scores = self._scorer.score_emb(s, p, all_entities, combine=\"sp_\")\n+ po_scores = self._scorer.score_emb(o, p_inv, all_entities, combine=\"sp_\")\n else:\n if entity_subset is not None:\n all_objects = self.get_o_embedder().embed(entity_subset)\n@@ -88,6 +88,6 @@ class ReciprocalRelationsModel(KgeModel):\n else:\n all_objects = self.get_o_embedder().embed_all()\n all_subjects = self.get_s_embedder().embed_all()\n- sp_scores = self._scorer.score_emb(s, p, all_objects, combine=\"sp*\")\n- po_scores = self._scorer.score_emb(o, p_inv, all_subjects, combine=\"sp*\")\n+ sp_scores = self._scorer.score_emb(s, p, all_objects, combine=\"sp_\")\n+ po_scores = self._scorer.score_emb(o, p_inv, all_subjects, combine=\"sp_\")\n return torch.cat((sp_scores, po_scores), dim=1)\n"
},
{
"change_type": "MODIFY",
"old_path": "kge/model/rescal.py",
"new_path": "kge/model/rescal.py",
"diff": "@@ -34,14 +34,14 @@ class RescalScorer(RelationalScorer):\n ).sum(\n dim=-1\n ) # and sum to obtain predictions\n- elif combine == \"sp*\":\n+ elif combine == \"sp_\":\n out = (\n s_emb.unsqueeze(1)\n .bmm(p_mixmat)\n .view(batch_size, entity_size)\n .mm(o_emb.transpose(0, 1))\n )\n- elif combine == \"*po\":\n+ elif combine == \"_po\":\n out = (\n p_mixmat.bmm(o_emb.unsqueeze(2))\n .view(batch_size, entity_size)\n"
},
{
"change_type": "MODIFY",
"old_path": "kge/model/rotate.py",
"new_path": "kge/model/rotate.py",
"diff": "@@ -4,7 +4,7 @@ from kge.model.kge_model import RelationalScorer, KgeModel\n from torch.nn import functional as F\n \n \n-# TODO sp* and *po scoring with RotatE leads to *large* intermediate results. It's\n+# TODO sp_ and _po scoring with RotatE leads to *large* intermediate results. It's\n # unclear whether this can be fixed. Expect out-of-memory errors when using RotatE with\n # 1vsAll or KvsAll training. To do validation/evaluation, you may want to set\n # eval.chunk_size.\n@@ -38,7 +38,7 @@ class RotatEScorer(RelationalScorer):\n \n # now take the norm of the absolute values of the difference vector\n out = torch.norm(diff_abs, dim=1, p=self._norm)\n- elif combine == \"sp*\":\n+ elif combine == \"sp_\":\n # as above, but pair each sp-pair with each object\n sp_emb_re, sp_emb_im = hadamard_complex(\n s_emb_re, s_emb_im, p_emb_re, p_emb_im\n@@ -48,7 +48,7 @@ class RotatEScorer(RelationalScorer):\n ) # sp x o x dim\n diff_abs = norm_complex(diff_re, diff_im) # sp x o x dim\n out = torch.norm(diff_abs, dim=2, p=self._norm)\n- elif combine == \"*po\":\n+ elif combine == \"_po\":\n # as above, but pair each subject with each po-pair\n sp_emb_re, sp_emb_im = pairwise_hadamard_complex(\n s_emb_re, s_emb_im, p_emb_re, p_emb_im\n"
},
{
"change_type": "MODIFY",
"old_path": "kge/model/simple.py",
"new_path": "kge/model/simple.py",
"diff": "@@ -22,10 +22,10 @@ class SimplEScorer(RelationalScorer):\n if combine == \"spo\":\n out1 = (s_emb_h * p_emb_forward * o_emb_t).sum(dim=1)\n out2 = (s_emb_t * p_emb_backward * o_emb_h).sum(dim=1)\n- elif combine == \"sp*\":\n+ elif combine == \"sp_\":\n out1 = (s_emb_h * p_emb_forward).mm(o_emb_t.transpose(0, 1))\n out2 = (s_emb_t * p_emb_backward).mm(o_emb_h.transpose(0, 1))\n- elif combine == \"*po\":\n+ elif combine == \"_po\":\n out1 = (o_emb_t * p_emb_forward).mm(s_emb_h.transpose(0, 1))\n out2 = (o_emb_h * p_emb_backward).mm(s_emb_t.transpose(0, 1))\n else:\n"
},
{
"change_type": "MODIFY",
"old_path": "kge/model/transe.py",
"new_path": "kge/model/transe.py",
"diff": "@@ -15,9 +15,9 @@ class TransEScorer(RelationalScorer):\n n = p_emb.size(0)\n if combine == \"spo\":\n out = -F.pairwise_distance(s_emb + p_emb, o_emb, p=self._norm)\n- elif combine == \"sp*\":\n+ elif combine == \"sp_\":\n out = -torch.cdist(s_emb + p_emb, o_emb, p=self._norm)\n- elif combine == \"*po\":\n+ elif combine == \"_po\":\n out = -torch.cdist(o_emb - p_emb, s_emb, p=self._norm)\n else:\n super().score_emb(s_emb, p_emb, o_emb, combine)\n"
}
] |
846a842a6dbd6c7f989bff5232c697be94ffb7b1 | altair-viz/altair | 07.01.2022 17:33:18 | BSD 3-Clause New or Revised License | Define SelectionPredicateComposition
Made a new class SelectionPredicateComposition to allow use of ~,&,| with certain PredicateCompositions. Rewrote test_filter_transform_selection_predicates to match the new syntax. | [
{
"change_type": "MODIFY",
"old_path": "altair/vegalite/v5/api.py",
"new_path": "altair/vegalite/v5/api.py",
"diff": "@@ -184,7 +184,7 @@ class Parameter(expr.core.OperatorMixin, object):\n \n def __invert__(self):\n if self.param_type == \"selection\":\n- return core.PredicateComposition({\"not\": {\"param\": self.name}})\n+ return SelectionPredicateComposition({\"not\": {\"param\": self.name}})\n else:\n return expr.core.OperatorMixin.__invert__(self)\n \n@@ -192,7 +192,7 @@ class Parameter(expr.core.OperatorMixin, object):\n if self.param_type == \"selection\":\n if isinstance(other, Parameter):\n other = {\"param\": other.name}\n- return core.PredicateComposition({\"and\": [{\"param\": self.name}, other]})\n+ return SelectionPredicateComposition({\"and\": [{\"param\": self.name}, other]})\n else:\n return expr.core.OperatorMixin.__and__(self, other)\n \n@@ -200,7 +200,7 @@ class Parameter(expr.core.OperatorMixin, object):\n if self.param_type == \"selection\":\n if isinstance(other, Parameter):\n other = {\"param\": other.name}\n- return core.PredicateComposition({\"or\": [{\"param\": self.name}, other]})\n+ return SelectionPredicateComposition({\"or\": [{\"param\": self.name}, other]})\n else:\n return expr.core.OperatorMixin.__or__(self, other)\n \n@@ -227,6 +227,18 @@ class Parameter(expr.core.OperatorMixin, object):\n return expr.core.GetAttrExpression(self.name, field_name)\n \n \n+# Enables use of ~, &, | with compositions of selection objects.\n+class SelectionPredicateComposition(core.PredicateComposition):\n+ def __invert__(self):\n+ return SelectionPredicateComposition({\"not\": self.to_dict()})\n+\n+ def __and__(self, other):\n+ return SelectionPredicateComposition({\"and\": [self.to_dict(), other.to_dict()]})\n+\n+ def __or__(self, other):\n+ return SelectionPredicateComposition({\"or\": [self.to_dict(), other.to_dict()]})\n+\n+\n class SelectionExpression(expr.core.OperatorMixin, object):\n def __init__(self, expr):\n self.expr = expr\n"
},
{
"change_type": "MODIFY",
"old_path": "altair/vegalite/v5/tests/test_api.py",
"new_path": "altair/vegalite/v5/tests/test_api.py",
"diff": "@@ -534,34 +534,34 @@ def test_filter_transform_selection_predicates():\n base = alt.Chart(\"data.txt\").mark_point()\n \n chart = base.transform_filter(selector1)\n- assert chart.to_dict()[\"transform\"] == [{\"filter\": {\"selection\": \"s1\"}}]\n+ assert chart.to_dict()[\"transform\"] == [{'filter': {'param': 's1'}}]\n \n chart = base.transform_filter(~selector1)\n- assert chart.to_dict()[\"transform\"] == [{\"filter\": {\"selection\": {\"not\": \"s1\"}}}]\n+ assert chart.to_dict()[\"transform\"] == [{'filter': {'not': {'param': 's1'}}}]\n \n chart = base.transform_filter(selector1 & selector2)\n assert chart.to_dict()[\"transform\"] == [\n- {\"filter\": {\"selection\": {\"and\": [\"s1\", \"s2\"]}}}\n+ {'filter': {'and': [{'param': 's1'}, {'param': 's2'}]}}\n ]\n \n chart = base.transform_filter(selector1 | selector2)\n assert chart.to_dict()[\"transform\"] == [\n- {\"filter\": {\"selection\": {\"or\": [\"s1\", \"s2\"]}}}\n+ {'filter': {'or': [{'param': 's1'}, {'param': 's2'}]}}\n ]\n \n chart = base.transform_filter(selector1 | ~selector2)\n assert chart.to_dict()[\"transform\"] == [\n- {\"filter\": {\"selection\": {\"or\": [\"s1\", {\"not\": \"s2\"}]}}}\n+ {'filter': {'or': [{'param': 's1'}, {'not': {'param': 's2'}}]}}\n ]\n \n chart = base.transform_filter(~selector1 | ~selector2)\n assert chart.to_dict()[\"transform\"] == [\n- {\"filter\": {\"selection\": {\"or\": [{\"not\": \"s1\"}, {\"not\": \"s2\"}]}}}\n+ {'filter': {'or': [{'not': {'param': 's1'}}, {'not': {'param': 's2'}}]}}\n ]\n \n chart = base.transform_filter(~(selector1 & selector2))\n assert chart.to_dict()[\"transform\"] == [\n- {\"filter\": {\"selection\": {\"not\": {\"and\": [\"s1\", \"s2\"]}}}}\n+ {'filter': {'not': {'and': [{'param': 's1'}, {'param': 's2'}]}}}\n ]\n \n \n"
}
] |
e5f40d2eb3e6b1fcf7773a13c53e903a7124b090 | choderalab/yank | 18.07.2017 20:13:20 | MIT License | Add MPI parallelization on different MPI communicators.
This is necessary to parallelize functions that are also parallelized. | [
{
"change_type": "MODIFY",
"old_path": "Yank/mpi.py",
"new_path": "Yank/mpi.py",
"diff": "@@ -44,6 +44,7 @@ import signal\n import logging\n from contextlib import contextmanager\n \n+import numpy as np\n # TODO drop this when we drop Python 2 support\n from openmoltools.utils import wraps_py2\n \n@@ -271,6 +272,13 @@ def distribute(task, distributed_args, *other_args, **kwargs):\n If True, the nodes will be synchronized at the end of the\n execution (i.e. the task will be blocking) even if the\n result is not shared (default is False).\n+ group_jobs : None, int or list of int, optional, default is None\n+ If not None, the `distributed_args` are distributed among groups of\n+ nodes that are isolated from each other. This is particularly useful\n+ if `task` also calls `distribute()`, since normally that would result\n+ in unexpected behavior. If an integer, the nodes are split into equal\n+ groups of `group_jobs` nodes. If a list of integers, the nodes are\n+ split in possibly unequal groups (see example below).\n \n Other Parameters\n ----------------\n@@ -305,9 +313,19 @@ def distribute(task, distributed_args, *other_args, **kwargs):\n >>> distribute(square, [1, 2, 3, 4], send_results_to=0)\n ([1, 4, 9, 16], [0, 1, 2, 3])\n \n+ Divide 3 nodes in two groups of 2 and 1 nodes. The task, in turn,\n+ can distribute another task among the nodes in its own group.\n+\n+ >>> def supertask(list_of_bases):\n+ ... distribute(square, list_of_bases)\n+ >>> list_of_supertask_args = [[1, 2, 3], [4], [5, 6]]\n+ >>> distribute(supertask, distributed_args=list_of_supertask_args,\n+ ... group_nodes=[2, 1])\n+\n \"\"\"\n send_results_to = kwargs.pop('send_results_to', None)\n sync_nodes = kwargs.pop('sync_nodes', False)\n+ group_nodes = kwargs.pop('group_nodes', None)\n mpicomm = get_mpicomm()\n n_jobs = len(distributed_args)\n \n@@ -320,16 +338,61 @@ def distribute(task, distributed_args, *other_args, **kwargs):\n else:\n return all_results, list(range(n_jobs))\n \n- node_job_ids = range(mpicomm.rank, n_jobs, mpicomm.size)\n+ # Determine the jobs that this node has to run.\n+ # If we need to group nodes, split the default mpicomm.\n+ if group_nodes is not None:\n+ # We don't support returning results.\n+ if send_results_to is not None:\n+ raise ValueError('Cannot return the result of the distributed '\n+ 'task if nodes are divided into groups.')\n+\n+ # Store original mpicomm that we'll have to restore later.\n+ original_mpicomm = mpicomm\n+\n+ # Determine the color of this node.\n+ try: # Check if this is an integer.\n+ color = int(mpicomm.rank / group_nodes)\n+ n_groups = int(np.ceil(mpicomm.size / group_nodes))\n+ except TypeError: # List of integers.\n+ # Check that the group division requested make sense.\n+ cumulative_sum_nodes = np.cumsum(group_nodes)\n+ if cumulative_sum_nodes[-1] != mpicomm.size:\n+ raise ValueError('The group division requested cannot be performed.\\n'\n+ 'Total number of nodes: {}\\n'\n+ 'Group nodes: {}'.format(mpicomm.size, group_nodes))\n+ # The first group_nodes[0] nodes have color 0, the next group_nodes[1] nodes\n+ # have color 1 etc.\n+ color = next(i for i, v in enumerate(cumulative_sum_nodes) if v > mpicomm.rank)\n+ n_groups = len(group_nodes)\n+\n+ # Split the mpicomm among nodes. Maintain same order using mpicomm.rank as rank.\n+ mpicomm = original_mpicomm.Split(color=color, key=mpicomm.rank)\n+\n+ # Cache new mpicomm so that task() will access the split mpicomm.\n+ get_mpicomm._mpicomm = mpicomm\n+\n+ # Distribute distributed_args by color.\n+ node_job_ids = range(color, n_jobs, n_groups)\n+ node_name = 'Group {}/{}, Node {}/{}'.format(color+1, n_groups,\n+ mpicomm.rank+1, mpicomm.size)\n+ else:\n+ # Distribute distributed_args by mpicomm.rank.\n+ node_job_ids = range(mpicomm.rank, n_jobs, mpicomm.size)\n+ node_name = 'Node {}/{}'.format(mpicomm.rank+1, mpicomm.size)\n \n # Compute all the results assigned to this node.\n results = []\n- node_name = 'Node {}/{}'.format(mpicomm.rank+1, mpicomm.size)\n for job_id in node_job_ids:\n distributed_arg = distributed_args[job_id]\n logger.debug('{}: execute {}({})'.format(node_name, task.__name__, distributed_arg))\n results.append(task(distributed_arg, *other_args, **kwargs))\n \n+ # Restore the original mpicomm.\n+ if group_nodes is not None:\n+ mpicomm.Free()\n+ mpicomm = original_mpicomm\n+ get_mpicomm._mpicomm = original_mpicomm\n+\n # Share result as specified.\n if send_results_to == 'all':\n logger.debug('{}: allgather results of {}'.format(node_name, task.__name__))\n"
},
{
"change_type": "MODIFY",
"old_path": "Yank/tests/test_mpi.py",
"new_path": "Yank/tests/test_mpi.py",
"diff": "@@ -13,8 +13,12 @@ Test MPI utility functions in mpi.py.\n # GLOBAL IMPORTS\n # =============================================================================\n \n-import numpy as np\n+import json\n+import shutil\n+import contextlib\n+\n from simtk import unit\n+from openmoltools.utils import temporary_cd\n \n from yank.mpi import *\n \n@@ -201,3 +205,72 @@ def test_distribute():\n \n result = distribute(task, distributed_args, send_results_to=None)\n assert_is_equal(result, (partial_expected_results, partial_job_indices))\n+\n+\n+def test_distribute_groups():\n+ \"\"\"Test distribute jobs among groups of nodes.\"\"\"\n+ # Configuration.\n+ group_nodes = 2\n+ temp_folder = 'temp_test_mpi_test_distribute_groups'\n+\n+ @contextlib.contextmanager\n+ def enter_temp_directory():\n+ run_single_node(0, os.makedirs, temp_folder, sync_nodes=True)\n+ try:\n+ with temporary_cd(temp_folder):\n+ yield\n+ finally:\n+ run_single_node(0, shutil.rmtree, temp_folder)\n+\n+\n+ def store_data(file_name, data):\n+ with open(file_name, 'w') as f:\n+ json.dump(data, f)\n+\n+ def supertask(list_of_bases):\n+ \"\"\"Compute square of bases and store results\"\"\"\n+ squared_values = distribute(square, list_of_bases, send_results_to='all')\n+ mpicomm = get_mpicomm()\n+ if mpicomm is None:\n+ mpi_size = 0\n+ else:\n+ mpi_size = mpicomm.size\n+ file_name = 'file_len{}.dat'.format(len(list_of_bases))\n+ run_single_node(0, store_data, file_name, (squared_values, mpi_size))\n+\n+ def verify_task(list_of_supertask_args):\n+ mpicomm = get_mpicomm()\n+ n_jobs = len(list_of_supertask_args)\n+\n+ # Find the job_ids assigned to the last group and the size of its communicator.\n+ if mpicomm is not None:\n+ n_groups = int(np.ceil(mpicomm.size / group_nodes))\n+ last_group_size = group_nodes - mpicomm.size % group_nodes\n+ last_group_job_ids = set(range(n_groups-1, n_jobs, n_groups))\n+\n+ # Verify all tasks.\n+ for supertask_args_idx, supertask_args in enumerate(list_of_supertask_args):\n+ file_name = 'file_len{}.dat'.format(len(supertask_args))\n+ with open(file_name, 'r') as f:\n+ squared_values, mpi_size = json.load(f)\n+\n+ # Check that result is correct.\n+ assert len(supertask_args) == len(squared_values)\n+ for idx, value in enumerate(squared_values):\n+ assert value == supertask_args[idx]**2\n+\n+ # Check that the correct group executed this task.\n+ if mpicomm is None:\n+ expected_mpi_size = 0\n+ elif supertask_args_idx in last_group_job_ids:\n+ expected_mpi_size = last_group_size\n+ else:\n+ expected_mpi_size = 2\n+ assert mpi_size == expected_mpi_size\n+\n+ # Super tasks will store results in the same temporary directory.\n+ with enter_temp_directory():\n+ list_of_supertask_args = [[1, 2], [3, 4, 5], [6, 7, 8, 9]]\n+ distribute(supertask, distributed_args=list_of_supertask_args, sync_nodes=True,\n+ group_nodes=group_nodes)\n+ run_single_node(0, verify_task, list_of_supertask_args)\n"
}
] |
a4565ef863c2bdf5acbcb216681b3f2ee9ca9918 | choderalab/yank | 24.07.2017 13:57:08 | MIT License | Add jobid and njobs argument to script command.
This allows to run multiple experiments from the same script in several parallel executions of YANK. | [
{
"change_type": "MODIFY",
"old_path": "Yank/commands/script.py",
"new_path": "Yank/commands/script.py",
"diff": "@@ -25,7 +25,7 @@ usage = \"\"\"\n YANK script\n \n Usage:\n- yank script (-y FILEPATH | --yaml=FILEPATH) [-o OVERRIDE]...\n+ yank script (-y FILEPATH | --yaml=FILEPATH) [--jobid=INTEGER] [--njobs=INTEGER] [-o OVERRIDE]...\n \n Description:\n Set up and run free energy calculations from a YAML script. All options can be specified in the YAML script.\n@@ -34,7 +34,11 @@ Required Arguments:\n -y, --yaml=FILEPATH Path to the YAML script specifying options and/or how to set up and run the experiment.\n \n Optional Arguments:\n-\n+ --jobid=INTEGER You can run only a subset of the experiments by specifying jobid and njobs, where\n+ 0 <= job_id <= n_jobs-1. In this case, njobs must be specified as well and YANK will\n+ run only 1/n_jobs of the experiments. This can be used to run several separate YANK\n+ executions in parallel starting from the same script.\n+ --njobs=INTEGER Specify the total number of parallel executions. jobid has to be specified too.\n -o, --override=OVERRIDE Override a single option in the script file. May be specified multiple times.\n Specified as a nested dictionary of the form:\n top_option:sub_option:value\n@@ -88,13 +92,22 @@ def dispatch(args):\n # This is done to avoid input type ambiguity and instead let the parser handle it as though it were a file\n override = str(override_dict).replace(\"'\", \"\").replace('\"', '')\n \n+ if args['--jobid']:\n+ job_id = args['--jobid']\n+ else:\n+ job_id = None\n+ if args['--njobs']:\n+ n_jobs = args['--njobs']\n+ else:\n+ n_jobs = None\n+\n if args['--yaml']:\n yaml_path = args['--yaml']\n \n if not os.path.isfile(yaml_path):\n raise ValueError('Cannot find YAML script \"{}\"'.format(yaml_path))\n \n- yaml_builder = ExperimentBuilder(yaml_source=yaml_path)\n+ yaml_builder = ExperimentBuilder(script=yaml_path, job_id=job_id, n_jobs=n_jobs)\n if override: # Parse the string present.\n yaml_builder.update_yaml(override)\n yaml_builder.run_experiments()\n"
},
{
"change_type": "MODIFY",
"old_path": "Yank/experiment.py",
"new_path": "Yank/experiment.py",
"diff": "@@ -375,7 +375,7 @@ class ExperimentBuilder(object):\n 'mc_displacement_sigma': 10.0 * unit.angstroms\n }\n \n- def __init__(self, script=None):\n+ def __init__(self, script=None, job_id=None, n_jobs=None):\n \"\"\"Constructor.\n \n Parameters\n@@ -383,8 +383,25 @@ class ExperimentBuilder(object):\n script : str or dict\n A path to the YAML script or the YAML content. If not specified, you\n can load it later by using parse() (default is None).\n+ job_id : None or int\n+ If you want to split the experiments among different executions,\n+ you can set this to an integer 0 <= job_id <= n_jobs-1, and this\n+ ExperimentBuilder will run only 1/n_jobs of the experiments.\n+ n_jobs : None or int\n+ If job_id is specified, this is the total number of jobs that\n+ you are running in parallel from your script.\n \n \"\"\"\n+ # Check consistency job_id and n_jobs.\n+ if job_id is not None:\n+ if n_jobs is None:\n+ raise ValueError('n_jobs must be specified together with job_id')\n+ if not 0 <= job_id <= n_jobs:\n+ raise ValueError('job_id must be between 0 and n_jobs ({})'.format(n_jobs))\n+\n+ self._job_id = job_id\n+ self._n_jobs = n_jobs\n+\n self._options = self.GENERAL_DEFAULT_OPTIONS.copy()\n self._options.update(self.EXPERIMENT_DEFAULT_OPTIONS.copy())\n \n@@ -530,7 +547,7 @@ class ExperimentBuilder(object):\n # Cycle between experiments every switch_experiment_interval iterations\n # until all of them are done. We don't know how many experiments\n # there are until after the end of first for-loop.\n- completed = [False] # There always be at least one experiment.\n+ completed = [False] # There is always at least one experiment.\n while not all(completed):\n for experiment_index, experiment in enumerate(self._build_experiments()):\n \n@@ -747,7 +764,8 @@ class ExperimentBuilder(object):\n def _expand_experiments(self):\n \"\"\"Generates all possible combinations of experiment.\n \n- Each generated experiment is uniquely named.\n+ Each generated experiment is uniquely named. If job_id and n_jobs are\n+ set, this returns only the experiments assigned to this particular job.\n \n Returns\n -------\n@@ -758,6 +776,11 @@ class ExperimentBuilder(object):\n The dictionary describing a single experiment.\n \n \"\"\"\n+ # We need to distribute experiments among jobs, but different\n+ # experiments sectiona may have a different number of combinations,\n+ # so we need to count them.\n+ experiment_id = 0\n+\n output_dir = ''\n for exp_name, experiment in utils.dictiter(self._experiments):\n if len(self._experiments) > 1:\n@@ -765,7 +788,9 @@ class ExperimentBuilder(object):\n \n # Loop over all combinations\n for name, combination in experiment.named_combinations(separator='_', max_name_length=50):\n- yield os.path.join(output_dir, name), combination\n+ if self._job_id is None or experiment_id % self._n_jobs == self._job_id:\n+ yield os.path.join(output_dir, name), combination\n+ experiment_id += 1\n \n # --------------------------------------------------------------------------\n # Parsing and syntax validation\n@@ -1203,15 +1228,19 @@ class ExperimentBuilder(object):\n def validate_experiment_options(options):\n return ExperimentBuilder._validate_options(options, validate_general_options=False)\n \n- # Check if there is a sequence of experiments or a single one\n+ # Check if there is a sequence of experiments or a single one.\n+ # We need to have a deterministic order of experiments so that\n+ # if we run multiple experiments in parallel, we won't have\n+ # multiple processes running the same one.\n try:\n if isinstance(yaml_content['experiments'], list):\n- self._experiments = {exp_name: utils.CombinatorialTree(yaml_content[exp_name])\n- for exp_name in yaml_content['experiments']}\n+ combinatorial_trees = [(exp_name, utils.CombinatorialTree(yaml_content[exp_name]))\n+ for exp_name in yaml_content['experiments']]\n else:\n- self._experiments = {'experiments': utils.CombinatorialTree(yaml_content['experiments'])}\n+ combinatorial_trees = [('experiments', utils.CombinatorialTree(yaml_content['experiments']))]\n+ self._experiments = collections.OrderedDict(combinatorial_trees)\n except KeyError:\n- self._experiments = {}\n+ self._experiments = collections.OrderedDict()\n return\n \n # Restraint schema contains type and optional parameters.\n@@ -1830,7 +1859,7 @@ class ExperimentBuilder(object):\n os.makedirs(directory)\n \n def _build_experiment(self, experiment, experiment_path):\n- \"\"\"Prepare and run a single experiment.\n+ \"\"\"Prepare a single experiment.\n \n Parameters\n ----------\n"
}
] |
926d3e6dbe16c21403a5f4fa83f066ecdff40c67 | choderalab/yank | 07.08.2018 11:51:41 | MIT License | Move CLI imports into functions for speed
In order to prevent the CLI from taking long times due to importing everything, especially when the user mis-types a command for docopt to handle, import statements for the CLI functions have been moved to the actual functions instead of the module header | [
{
"change_type": "MODIFY",
"old_path": "Yank/commands/analyze.py",
"new_path": "Yank/commands/analyze.py",
"diff": "@@ -13,15 +13,7 @@ Analyze YANK output file.\n # MODULE IMPORTS\n # =============================================================================================\n \n-import io\n-import re\n-import os\n-import pickle\n-\n-from simtk import unit\n-\n-import pkg_resources\n-from .. import utils, analyze, mpi\n+# Module imports handled in individual functions since CLI should be faster to boot up\n \n # =============================================================================================\n # COMMAND-LINE INTERFACE\n@@ -109,6 +101,11 @@ General Options:\n \n \n def dispatch(args):\n+\n+ import os\n+ import pickle\n+ from .. import utils, analyze, mpi\n+\n utils.config_root_logger(args['--verbose'])\n \n if args['report']:\n@@ -154,6 +151,9 @@ def dispatch(args):\n \n \n def extract_analyzer_kwargs(args, quantities_as_strings=False):\n+\n+ import simtk.unit as unit\n+\n \"\"\"Return a dictionary with the keyword arguments to pass to the analyzer.\"\"\"\n analyzer_kwargs = {}\n if args['--skipunbiasing']:\n@@ -172,6 +172,10 @@ def extract_analyzer_kwargs(args, quantities_as_strings=False):\n \n \n def dispatch_extract_trajectory(args):\n+\n+ import os\n+ from .. import analyze\n+\n # Paths\n output_path = args['--trajectory']\n nc_path = args['--netcdf']\n@@ -214,6 +218,12 @@ def dispatch_extract_trajectory(args):\n \n def dispatch_report(args):\n \n+ import io\n+ import os\n+ import re\n+ import pkg_resources\n+ from .. import analyze\n+\n # Check modules for render\n store = args['--store']\n yaml_input = args['--yaml']\n"
},
{
"change_type": "MODIFY",
"old_path": "Yank/commands/script.py",
"new_path": "Yank/commands/script.py",
"diff": "@@ -13,9 +13,7 @@ Set up and run YANK calculation from script.\n # GLOBAL IMPORTS\n # =============================================================================================\n \n-import os\n-from ..experiment import ExperimentBuilder\n-\n+# Module imports handled in individual functions since CLI should be faster to boot up\n \n # =============================================================================================\n # COMMAND-LINE INTERFACE\n@@ -67,6 +65,10 @@ def dispatch(args):\n Command-line arguments from docopt.\n \n \"\"\"\n+\n+ import os\n+ from ..experiment import ExperimentBuilder\n+\n override = None\n if args['--override']: # Is False for None and [] (empty list)\n over_opts = args['--override']\n"
},
{
"change_type": "MODIFY",
"old_path": "Yank/commands/selftest.py",
"new_path": "Yank/commands/selftest.py",
"diff": "@@ -13,15 +13,7 @@ Run YANK self tests after installation.\n # MODULE IMPORTS\n # =============================================================================================\n \n-import doctest\n-import pkgutil\n-import subprocess\n-import re\n-\n-from .. import version\n-from . import platforms\n-import simtk.openmm as mm\n-\n+# Module imports handled in individual functions since CLI should be faster to boot up\n \n # =============================================================================================\n # COMMAND-LINE INTERFACE\n@@ -48,6 +40,7 @@ General Options:\n # COMMAND DISPATCH\n # =============================================================================================\n \n+\n class LicenseError(Exception):\n \"\"\"Error raised by a missing License.\"\"\"\n pass\n@@ -55,6 +48,14 @@ class LicenseError(Exception):\n \n def dispatch(args):\n \n+ import re\n+ import doctest\n+ import pkgutil\n+ import subprocess\n+ import simtk.openmm as mm\n+ from .. import version\n+ from . import platforms\n+\n # Determine verbosity in advance\n # TODO: Figure out how to get -v back in to command and allow -vv and -vvv\n # nosetests: -v == --verbosity=2\n"
},
{
"change_type": "MODIFY",
"old_path": "Yank/commands/status.py",
"new_path": "Yank/commands/status.py",
"diff": "@@ -13,11 +13,7 @@ Query output files for quick status.\n # MODULE IMPORTS\n # =============================================================================================\n \n-import operator\n-import itertools\n-import collections\n-\n-from .. import experiment\n+# Module imports handled in individual functions since CLI should be faster to boot up\n \n # =============================================================================================\n # COMMAND-LINE INTERFACE\n@@ -62,8 +58,12 @@ def find_contiguous_ids(job_ids):\n The job ids organized in contiguous sets.\n \n \"\"\"\n+\n+ import operator\n+ import itertools\n+\n contiguous_job_ids = []\n- for k, g in itertools.groupby(enumerate(job_ids), lambda x:x[0]-x[1]):\n+ for k, g in itertools.groupby(enumerate(job_ids), lambda x: x[0]-x[1]):\n group = list(map(operator.itemgetter(1), g))\n if len(group) == 1:\n contiguous_job_ids.append(str(group[0]))\n@@ -73,6 +73,10 @@ def find_contiguous_ids(job_ids):\n \n \n def dispatch(args):\n+\n+ import collections\n+ from .. import experiment\n+\n # Handle optional arguments.\n if args['--njobs']:\n n_jobs = int(args['--njobs'])\n"
}
] |
b0a8c1dc01bd027cc4b1efd12a7cba9a2b7416a9 | lagolunatic/wwrando | 17.12.2022 20:00:12 | MIT License | Improve performance of path hint generation
Avoid instantiating tons of different logic instances and just reuse the same instance by resetting its state every time it's needed. | [
{
"change_type": "MODIFY",
"old_path": "hints.py",
"new_path": "hints.py",
"diff": "@@ -64,6 +64,9 @@ class Hints:\n self.logic = rando.logic\n self.options = rando.options\n \n+ self.path_logic = Logic(self.rando)\n+ self.path_logic_initial_state = self.path_logic.save_simulated_playthrough_state()\n+ \n # Define instance variable shortcuts for hint distribution options.\n self.max_path_hints = int(self.options.get(\"num_path_hints\", 0))\n self.max_barren_hints = int(self.options.get(\"num_barren_hints\", 0))\n@@ -193,27 +196,29 @@ class Hints:\n return chart_name_to_sunken_treasure\n \n def check_location_required_for_paths(self, location_to_check, paths_to_check):\n+ # To check whether the location is required or not, we simulate a playthrough and remove the item the player would\n+ # receive at that location immediately after they receive it. If the player can still fulfill the requirement \n+ # despite not having this item, the location is not required.\n+ \n # If the item is not a progress item, there's no way it's required.\n item_name = self.logic.done_item_locations[location_to_check]\n if item_name not in self.logic.all_progress_items:\n return False\n \n- # Effectively, to check whether the location is required or not, we simulate a playthrough and remove the item the\n- # player would receive at that location immediately after they receive it. If the player can still fulfill the\n- # requirement despite not having this item, the location is not required.\n- logic = Logic(self.rando)\n+ # Reuse a single Logic instance over multiple calls to this function for performance reasons.\n+ self.path_logic.load_simulated_playthrough_state(self.path_logic_initial_state)\n previously_accessible_locations = []\n \n- while logic.unplaced_progress_items:\n+ while self.path_logic.unplaced_progress_items:\n progress_items_in_this_sphere = OrderedDict()\n \n- accessible_locations = logic.get_accessible_remaining_locations()\n+ accessible_locations = self.path_logic.get_accessible_remaining_locations()\n locations_in_this_sphere = [\n loc for loc in accessible_locations\n if loc not in previously_accessible_locations\n ]\n if not locations_in_this_sphere:\n- return {path_name: not logic.check_requirement_met(self.DUNGEON_NAME_TO_REQUIREMENT_NAME[path_name]) for path_name in paths_to_check}\n+ break\n \n \n if not self.options.get(\"keylunacy\"):\n@@ -231,36 +236,40 @@ class Hints:\n item_name = self.logic.prerandomization_item_locations[small_key_location_name]\n assert item_name.endswith(\" Small Key\")\n \n- logic.add_owned_item(item_name)\n+ self.path_logic.add_owned_item(item_name)\n # Remove small key from owned items if it was from the location we want to check\n if small_key_location_name == location_to_check:\n- logic.currently_owned_items.remove(logic.clean_item_name(item_name))\n+ self.path_logic.currently_owned_items.remove(self.path_logic.clean_item_name(item_name))\n \n previously_accessible_locations += newly_accessible_small_key_locations\n continue # Redo this loop iteration with the small key locations no longer being considered 'remaining'.\n \n \n # Hide duplicated progression items (e.g. Empty Bottles) when they are placed in non-progression locations to avoid confusion and inconsistency.\n- locations_in_this_sphere = logic.filter_locations_for_progression(locations_in_this_sphere)\n+ locations_in_this_sphere = self.path_logic.filter_locations_for_progression(locations_in_this_sphere)\n \n for location_name in locations_in_this_sphere:\n item_name = self.logic.done_item_locations[location_name]\n- if item_name in logic.all_progress_items:\n+ if item_name in self.path_logic.all_progress_items:\n progress_items_in_this_sphere[location_name] = item_name\n \n for location_name, item_name in progress_items_in_this_sphere.items():\n- logic.add_owned_item(item_name)\n+ self.path_logic.add_owned_item(item_name)\n # Remove item from owned items if it was from the location we want to check.\n if location_name == location_to_check:\n- logic.currently_owned_items.remove(logic.clean_item_name(item_name))\n- for group_name, item_names in logic.progress_item_groups.items():\n- entire_group_is_owned = all(item_name in logic.currently_owned_items for item_name in item_names)\n- if entire_group_is_owned and group_name in logic.unplaced_progress_items:\n- logic.unplaced_progress_items.remove(group_name)\n+ self.path_logic.currently_owned_items.remove(self.path_logic.clean_item_name(item_name))\n+ for group_name, item_names in self.path_logic.progress_item_groups.items():\n+ entire_group_is_owned = all(item_name in self.path_logic.currently_owned_items for item_name in item_names)\n+ if entire_group_is_owned and group_name in self.path_logic.unplaced_progress_items:\n+ self.path_logic.unplaced_progress_items.remove(group_name)\n \n previously_accessible_locations = accessible_locations\n \n- return {path_name: not logic.check_requirement_met(self.DUNGEON_NAME_TO_REQUIREMENT_NAME[path_name]) for path_name in paths_to_check}\n+ requirements_met = {\n+ path_name: not self.path_logic.check_requirement_met(self.DUNGEON_NAME_TO_REQUIREMENT_NAME[path_name])\n+ for path_name in paths_to_check\n+ }\n+ return requirements_met\n \n def get_required_locations_for_paths(self):\n # Add all race-mode dungeons as paths, in addition to Hyrule and Ganon's Tower.\n"
},
{
"change_type": "MODIFY",
"old_path": "logic/logic.py",
"new_path": "logic/logic.py",
"diff": "@@ -174,6 +174,22 @@ class Logic:\n self.requirement_met_cache.clear()\n self.cached_enemies_tested_for_reqs_tuple = OrderedDict()\n \n+ def save_simulated_playthrough_state(self):\n+ vars_backup = {}\n+ for attr_name in [\n+ \"currently_owned_items\",\n+ \"unplaced_progress_items\",\n+ \"unplaced_nonprogress_items\",\n+ \"unplaced_fixed_consumable_items\",\n+ \"requirement_met_cache\",\n+ ]:\n+ vars_backup[attr_name] = getattr(self, attr_name).copy()\n+ return vars_backup\n+ \n+ def load_simulated_playthrough_state(self, vars_backup):\n+ for attr_name, value in vars_backup.items():\n+ setattr(self, attr_name, value.copy())\n+ \n def is_dungeon_or_cave(self, location_name):\n # Look up the setting that the location name is under\n is_dungeon = \"Dungeon\" in self.item_locations[location_name][\"Types\"]\n"
}
] |
cf0d18e6334193e198d9eb105eb775635198129b | genericmappingtools/pygmt | 05.07.2017 10:44:59 | BSD 3-Clause New or Revised License | Create and destroy C sessions inside call_module
Removes the need to always do this when using the C API. It's what the
command line app does anyway so it's not wasteful. | [
{
"change_type": "MODIFY",
"old_path": "gmt/clib/functions.py",
"new_path": "gmt/clib/functions.py",
"diff": "@@ -63,20 +63,21 @@ def destroy_session(session):\n assert status == 0, 'Failed with status code {}.'.format(status)\n \n \n-def call_module(session, module, args):\n+def call_module(module, args):\n \"\"\"\n Call a GMT module with the given arguments.\n \n Makes a call to ``GMT_Call_Module`` from the C API using mode\n- \"GMT_MODULE_CMD\" (arguments passed as a single string).\n+ ``GMT_MODULE_CMD`` (arguments passed as a single string).\n \n Most interactions with the C API are done through this function.\n \n+ Creates a new C API session (:func:`gmt.clib.create_session`) to pass to\n+ ``GMT_Call_Module`` and destroys it (:func:`gmt.clib.destroy_session`)\n+ after it is used. This is what the command-line interface of GMT does.\n+\n Parameters\n ----------\n- session : ctypes.c_void_p\n- A void pointer to a GMTAPI_CTRL structure created by\n- :func:`gmt.clib.create_session`.\n module : str\n Module name (``'pscoast'``, ``'psbasemap'``, etc).\n args : str\n@@ -90,6 +91,8 @@ def call_module(session, module, args):\n c_call_module.argtypes = [ctypes.c_void_p, ctypes.c_char_p, ctypes.c_int,\n ctypes.c_void_p]\n c_call_module.restype = ctypes.c_int\n+ session = create_session()\n status = c_call_module(session, module.encode(), mode, args.encode())\n+ destroy_session(session)\n assert status is not None, 'Failed returning None.'\n assert status == 0, 'Failed with status code {}.'.format(status)\n"
},
{
"change_type": "MODIFY",
"old_path": "gmt/session_management.py",
"new_path": "gmt/session_management.py",
"diff": "@@ -27,8 +27,7 @@ def begin(prefix='gmtsession', fmt='pdf'):\n * ps: PostScript.\n * tif: Tagged Image Format File.\n \"\"\"\n- session = clib.create_session()\n- clib.call_module(session, 'begin', '{} {}'.format(prefix, fmt))\n+ clib.call_module('begin', '{} {}'.format(prefix, fmt))\n \n \n def end():\n@@ -41,8 +40,7 @@ def end():\n ``gmt.begin``), and bring the figures to the working directory.\n \n \"\"\"\n- session = clib.create_session()\n- clib.call_module(session, 'end', '')\n+ clib.call_module('end', '')\n \n \n # Not working yet (perhaps bug in GMT).\n@@ -77,6 +75,5 @@ def figure(prefix, formats='pdf', convertoptions='A,P'):\n ``'A[<args>],C<args>,D<dir>,E<dpi>,P,Q<args>,S'``.\n \n \"\"\"\n- session = clib.create_session()\n args = '{} {} {}'.format(prefix, formats, convertoptions)\n- clib.call_module(session, 'figure', args)\n+ clib.call_module('figure', args)\n"
},
{
"change_type": "MODIFY",
"old_path": "gmt/tests/test_clib.py",
"new_path": "gmt/tests/test_clib.py",
"diff": "@@ -27,9 +27,7 @@ def test_call_module():\n \"Run a psbasemap call to see if the module works\"\n module = 'psbasemap'\n args = '-R10/70/-3/8 -JX4i/3i -Ba -P ->tmp.ps'\n- session = create_session()\n- call_module(session, module, args)\n- destroy_session(session)\n+ call_module(module, args)\n assert os.path.exists('tmp.ps')\n os.remove('tmp.ps')\n # Not the most ideal test. Just check if no segfaults or exceptions occur.\n"
},
{
"change_type": "MODIFY",
"old_path": "gmt/tests/test_session_management.py",
"new_path": "gmt/tests/test_session_management.py",
"diff": "@@ -4,7 +4,7 @@ Test the session management modules.\n import os\n \n from .. import begin, end, figure\n-from ..clib import call_module, create_session\n+from ..clib import call_module\n from .utils import figure_comparison_test\n \n \n@@ -15,8 +15,7 @@ TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), 'data')\n def test_session(prefix, fmt):\n \"Run a command inside a begin-end modern mode block.\"\n begin(prefix=prefix, fmt=fmt)\n- session = create_session()\n- call_module(session, 'psbasemap', '-R10/70/-3/8 -JX4i/3i -Ba -P')\n+ call_module('psbasemap', '-R10/70/-3/8 -JX4i/3i -Ba -P')\n end()\n \n \n@@ -25,9 +24,8 @@ def test_session_figure(prefix, fmt):\n \"Run a figure command inside a begin-end modern mode block.\"\n begin()\n figure(prefix=prefix, formats=fmt)\n- session = create_session()\n- call_module(session, 'psbasemap', '-R10/70/-3/8 -JX4i/3i -Ba -P')\n+ call_module('psbasemap', '-R10/70/-3/8 -JX4i/3i -Ba -P')\n # Plot some points with red circles\n data_file = os.path.join(TEST_DATA_DIR, 'points.txt')\n- call_module(session, 'psxy', '-<{} -Sc -Gred'.format(data_file))\n+ call_module('psxy', '-<{} -Sc -Gred'.format(data_file))\n end()\n"
}
] |
15eeaa5a0080ca063d421feb0d64e94947c478b6 | genericmappingtools/pygmt | 05.07.2017 17:08:25 | BSD 3-Clause New or Revised License | Hide away session management
Implement a GMTSession class to handle calling begin and end. Use it as
a global (package level) session. This way, each script/notebook uses
a single session. begin and end are no longer public API functions. | [
{
"change_type": "MODIFY",
"old_path": "doc/api.rst",
"new_path": "doc/api.rst",
"diff": "@@ -6,15 +6,13 @@ API Reference\n High-level functions for GMT modules\n ------------------------------------\n \n-Each GMT module (``gmt pscoas``, ``gmt grdgradient``, etc.) is wrapped by a\n+Each GMT module (``gmt pscoast``, ``gmt psbasemap``, etc.) is wrapped by a\n function in the ``gmt`` top-level module.\n \n .. autosummary::\n :toctree: api/\n :template: function.rst\n \n- gmt.begin\n- gmt.end\n gmt.figure\n gmt.pscoast\n \n"
},
{
"change_type": "MODIFY",
"old_path": "gmt/__init__.py",
"new_path": "gmt/__init__.py",
"diff": "@@ -5,12 +5,20 @@ from ._version import get_versions\n \n # Import modules to make the high-level GMT Python API\n from .ps_modules import pscoast\n-from .session_management import begin, end, figure\n+from .session_management import figure, GMTSession\n \n \n+# Get the version number through versioneer\n __version__ = get_versions()['version']\n+# Delete the function so that it doesn't appear in the public API\n del get_versions\n \n+# Start our global modern mode session. It calls \"gmt.begin\" when started and\n+# \"gmt.end\" when deleted.\n+_GLOBAL_SESSION = GMTSession()\n+# Delete the class so that it doesn't appear in the public API\n+del GMTSession\n+\n \n def test(doctest=True, verbose=True, coverage=False, figures=True):\n \"\"\"\n"
},
{
"change_type": "MODIFY",
"old_path": "gmt/session_management.py",
"new_path": "gmt/session_management.py",
"diff": "@@ -46,3 +46,51 @@ def figure():\n # Passing format '-' tells gmt.end to not produce any files.\n fmt = '-'\n clib.call_module('figure', '{} {}'.format(prefix, fmt))\n+\n+\n+class GMTSession():\n+ \"\"\"\n+ Placeholder for an active modern mode session.\n+\n+ Calls ``begin`` and ``figure`` when created. Calls ``end`` when destroyed\n+ so that the tmp files are cleaned.\n+\n+ The call to ``figure`` is necessary because the default behavior in Python\n+ is to not generate figure files unless explicitly commanded by\n+ ``psconvert`` or ``savefig``. The call starts a new figure with the format\n+ ``-`` which indicates that ``end`` should skip processing that figure.\n+\n+ \"\"\"\n+\n+ def __init__(self):\n+ self.is_active = False\n+ self.begin()\n+\n+ def begin(self):\n+ \"\"\"\n+ Starts a modern mode session by calling ``begin`` and ``figure``.\n+\n+ Sets the attribute ``_is_active`` to ``True`` to indicate that there\n+ is an active session.\n+ \"\"\"\n+ assert not self.is_active, \\\n+ \"Session is already active. Can't start two simultaneous sessions\"\n+ begin()\n+ figure()\n+ self.is_active = True\n+\n+ def end(self):\n+ \"\"\"\n+ End the current session.\n+ \"\"\"\n+ assert self.is_active, \"Can't end an inactive session.\"\n+ end()\n+ self.is_active = False\n+\n+ def restart(self):\n+ \"\"\"\n+ End the current session (if it's active) and start a new one.\n+ \"\"\"\n+ if self.is_active:\n+ self.end()\n+ self.begin()\n"
},
{
"change_type": "MODIFY",
"old_path": "gmt/tests/test_clib.py",
"new_path": "gmt/tests/test_clib.py",
"diff": "@@ -6,6 +6,9 @@ import os\n from ..clib import create_session, destroy_session, call_module, load_libgmt\n \n \n+TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), 'data')\n+\n+\n def test_load_libgmt():\n \"Test that loading libgmt works and doesn't crash.\"\n libgmt = load_libgmt()\n@@ -25,9 +28,11 @@ def test_clib_session_management():\n \n def test_call_module():\n \"Run a psbasemap call to see if the module works\"\n- module = 'psbasemap'\n- args = '-R10/70/-3/8 -JX4i/3i -Ba -P ->tmp.ps'\n- call_module(module, args)\n- assert os.path.exists('tmp.ps')\n- os.remove('tmp.ps')\n- # Not the most ideal test. Just check if no segfaults or exceptions occur.\n+ data_fname = os.path.join(TEST_DATA_DIR, 'points.txt')\n+ out_fname = 'test_call_module.txt'\n+ call_module('gmtinfo', '{} -C ->{}'.format(data_fname, out_fname))\n+ assert os.path.exists(out_fname)\n+ with open(out_fname) as out_file:\n+ output = out_file.read().strip().replace('\\t', ' ')\n+ assert output == '11.5309 61.7074 -2.9289 7.8648 0.1412 0.9338'\n+ os.remove(out_fname)\n"
},
{
"change_type": "MODIFY",
"old_path": "gmt/tests/test_session_management.py",
"new_path": "gmt/tests/test_session_management.py",
"diff": "@@ -3,28 +3,85 @@ Test the session management modules.\n \"\"\"\n import os\n \n-from .. import begin, end, figure\n+from pytest import raises\n+\n+from .. import figure, _GLOBAL_SESSION\n+from ..session_management import begin, end, GMTSession\n from ..clib import call_module\n \n \n-def test_session():\n+def test_begin_end():\n \"\"\"\"\n Run a command inside a begin-end modern mode block.\n+ First, end the global session. When finished, restart it.\n \"\"\"\n+ _GLOBAL_SESSION.end()\n begin()\n call_module('psbasemap', '-R10/70/-3/8 -JX4i/3i -Ba -P')\n end()\n+ _GLOBAL_SESSION.restart()\n assert os.path.exists('gmt-python-session.pdf')\n os.remove('gmt-python-session.pdf')\n \n \n def test_session_figure():\n \"\"\"\n- Run a figure command inside a begin-end modern mode block.\n- No file should be generated.\n+ Run a figure command and check that no file is be generated by gmt.end\n+\n+ Need to end the global session before doing this.\n \"\"\"\n+ _GLOBAL_SESSION.end()\n begin()\n figure()\n call_module('psbasemap', '-R10/70/-3/8 -JX4i/3i -Ba -P')\n end()\n+ _GLOBAL_SESSION.restart()\n assert not os.path.exists('gmt-python-figure.pdf')\n+\n+\n+def test_gmtsession_begin_error():\n+ \"\"\"\n+ Check that an error is raised when trying to start session without ending\n+ it first.\n+ \"\"\"\n+ _GLOBAL_SESSION.end()\n+ session = GMTSession()\n+ with raises(AssertionError):\n+ session.begin()\n+ session.end()\n+ _GLOBAL_SESSION.restart()\n+\n+\n+def test_gmtsession_restart():\n+ \"\"\"\n+ Check that a session can be restarted without crashes.\n+ Restart should kill current session and begin a new one.\n+ There should be no way to begin a session without ending it first.\n+ \"\"\"\n+ _GLOBAL_SESSION.end()\n+ session = GMTSession()\n+ assert session.is_active\n+ # Should work when session is active\n+ session.restart()\n+ assert session.is_active\n+ # And also when it isn't\n+ session.end()\n+ assert not session.is_active\n+ session.restart()\n+ assert session.is_active\n+ session.end()\n+ _GLOBAL_SESSION.restart()\n+\n+\n+def test_gmtsession_error_end():\n+ \"\"\"\n+ Should raise an error when calling end twice in a row.\n+ \"\"\"\n+ _GLOBAL_SESSION.end()\n+ session = GMTSession()\n+ assert session.is_active\n+ session.end()\n+ assert not session.is_active\n+ with raises(AssertionError):\n+ session.end()\n+ _GLOBAL_SESSION.restart()\n"
}
] |
710501ab0ac79a8fb5d6798936de652d1a4e3d3d | genericmappingtools/pygmt | 07.07.2017 00:17:57 | BSD 3-Clause New or Revised License | Replace the global GMTSession with 'atexit'
No need for the class or the global instance. Register 'end' with
'atexit' to make sure end is called when exiting Python. | [
{
"change_type": "MODIFY",
"old_path": "gmt/__init__.py",
"new_path": "gmt/__init__.py",
"diff": "@@ -1,23 +1,22 @@\n \"\"\"\n GMT Python interface\n \"\"\"\n-from ._version import get_versions\n+import atexit as _atexit\n+\n+from ._version import get_versions as _get_versions\n \n # Import modules to make the high-level GMT Python API\n from .ps_modules import pscoast, psconvert, psbasemap\n-from .session_management import figure, GMTSession\n+from .session_management import figure, begin as _begin, end as _end\n \n \n # Get the version number through versioneer\n-__version__ = get_versions()['version']\n-# Delete the function so that it doesn't appear in the public API\n-del get_versions\n+__version__ = _get_versions()['version']\n \n-# Start our global modern mode session. It calls \"gmt.begin\" when started and\n-# \"gmt.end\" when deleted.\n-_GLOBAL_SESSION = GMTSession()\n-# Delete the class so that it doesn't appear in the public API\n-del GMTSession\n+# Start our global modern mode session\n+_begin()\n+# Tell Python to run _end when shutting down\n+_atexit.register(_end)\n \n \n def test(doctest=True, verbose=True, coverage=False, figures=True):\n"
},
{
"change_type": "MODIFY",
"old_path": "gmt/session_management.py",
"new_path": "gmt/session_management.py",
"diff": "@@ -1,5 +1,5 @@\n \"\"\"\n-Session management modules: begin, end, figure, clean\n+Session management modules: begin, end, figure, etc\n \"\"\"\n from . import clib\n \n@@ -46,58 +46,3 @@ def figure():\n # Passing format '-' tells gmt.end to not produce any files.\n fmt = '-'\n clib.call_module('figure', '{} {}'.format(prefix, fmt))\n-\n-\n-class GMTSession():\n- \"\"\"\n- Placeholder for an active modern mode session.\n-\n- Calls ``begin`` and ``figure`` when created. Calls ``end`` when destroyed\n- so that the temporary files are cleaned.\n-\n- The call to ``figure`` is necessary because the default behavior in Python\n- is to not generate figure files unless explicitly commanded by\n- ``psconvert`` or ``savefig``. The call starts a new figure with the format\n- ``-`` which indicates that ``end`` should skip processing that figure.\n-\n- \"\"\"\n-\n- def __init__(self):\n- self.is_active = False\n- self.begin()\n-\n- def begin(self):\n- \"\"\"\n- Starts a modern mode session by calling ``begin`` and ``figure``.\n-\n- Sets the attribute ``_is_active`` to ``True`` to indicate that there\n- is an active session.\n- \"\"\"\n- assert not self.is_active, \\\n- \"Session is already active. Can't start two simultaneous sessions\"\n- begin()\n- figure()\n- self.is_active = True\n-\n- def end(self):\n- \"\"\"\n- End the current session.\n- \"\"\"\n- assert self.is_active, \"Can't end an inactive session.\"\n- end()\n- self.is_active = False\n-\n- def restart(self):\n- \"\"\"\n- End the current session (if it's active) and start a new one.\n- \"\"\"\n- if self.is_active:\n- self.end()\n- self.begin()\n-\n- def __del__(self):\n- \"\"\"\n- When the session is being garbage collected, call ``end`` to clean up\n- the session.\n- \"\"\"\n- self.end()\n"
},
{
"change_type": "MODIFY",
"old_path": "gmt/tests/test_session_management.py",
"new_path": "gmt/tests/test_session_management.py",
"diff": "@@ -3,10 +3,8 @@ Test the session management modules.\n \"\"\"\n import os\n \n-from pytest import raises\n-\n-from .. import figure, _GLOBAL_SESSION\n-from ..session_management import begin, end, GMTSession\n+from .. import figure\n+from ..session_management import begin, end\n from ..clib import call_module\n \n \n@@ -15,11 +13,11 @@ def test_begin_end():\n Run a command inside a begin-end modern mode block.\n First, end the global session. When finished, restart it.\n \"\"\"\n- _GLOBAL_SESSION.end()\n+ end() # Kill the global session\n begin()\n call_module('psbasemap', '-R10/70/-3/8 -JX4i/3i -Ba -P')\n end()\n- _GLOBAL_SESSION.restart()\n+ begin() # Restart the global session\n assert os.path.exists('gmt-python-session.pdf')\n os.remove('gmt-python-session.pdf')\n \n@@ -30,58 +28,10 @@ def test_session_figure():\n \n Need to end the global session before doing this.\n \"\"\"\n- _GLOBAL_SESSION.end()\n+ end() # Kill the global session\n begin()\n figure()\n call_module('psbasemap', '-R10/70/-3/8 -JX4i/3i -Ba -P')\n end()\n- _GLOBAL_SESSION.restart()\n+ begin() # Restart the global session\n assert not os.path.exists('gmt-python-figure.pdf')\n-\n-\n-def test_gmtsession_begin_error():\n- \"\"\"\n- Check that an error is raised when trying to start session without ending\n- it first.\n- \"\"\"\n- _GLOBAL_SESSION.end()\n- session = GMTSession()\n- with raises(AssertionError):\n- session.begin()\n- session.end()\n- _GLOBAL_SESSION.restart()\n-\n-\n-def test_gmtsession_restart():\n- \"\"\"\n- Check that a session can be restarted without crashes.\n- Restart should kill current session and begin a new one.\n- There should be no way to begin a session without ending it first.\n- \"\"\"\n- _GLOBAL_SESSION.end()\n- session = GMTSession()\n- assert session.is_active\n- # Should work when session is active\n- session.restart()\n- assert session.is_active\n- # And also when it isn't\n- session.end()\n- assert not session.is_active\n- session.restart()\n- assert session.is_active\n- session.end()\n- _GLOBAL_SESSION.restart()\n-\n-\n-def test_gmtsession_error_end():\n- \"\"\"\n- Should raise an error when calling end twice in a row.\n- \"\"\"\n- _GLOBAL_SESSION.end()\n- session = GMTSession()\n- assert session.is_active\n- session.end()\n- assert not session.is_active\n- with raises(AssertionError):\n- session.end()\n- _GLOBAL_SESSION.restart()\n"
}
] |
9a581830e4fa02eed501b4e1f546a2e2ea358e13 | bitcoinunlimited/bitcoinunlimited | 11.07.2017 09:49:39 | MIT License | Add 4 block attack scenarios to PV python testing
Also add the needed option -pvtest which slows down the checking
of inputs by putting a sleep time of 1 second for each input to
check. This allows up to simulate very long to validate blocks
used in the attack scenarios. | [
{
"change_type": "MODIFY",
"old_path": "qa/pull-tester/rpc-tests.py",
"new_path": "qa/pull-tester/rpc-tests.py",
"diff": "@@ -209,6 +209,7 @@ testScripts = [ RpcTest(t) for t in [\n testScriptsExt = [ RpcTest(t) for t in [\n 'txPerf',\n 'excessive --extensive',\n+ 'parallel --extensive',\n 'bip9-softforks',\n 'bip65-cltv',\n 'bip65-cltv-p2p',\n"
},
{
"change_type": "MODIFY",
"old_path": "qa/rpc-tests/parallel.py",
"new_path": "qa/rpc-tests/parallel.py",
"diff": "@@ -11,6 +11,7 @@ from test_framework.util import *\n class ParallelTest (BitcoinTestFramework):\n def __init__(self):\n self.rep = False\n+ #self.extensive = False\n BitcoinTestFramework.__init__(self)\n \n def setup_chain(self):\n@@ -67,7 +68,7 @@ class ParallelTest (BitcoinTestFramework):\n if self.rep:\n self.repetitiveTest()\n return\n- \n+\n print (\"Mining blocks with PV off...\")\n \n # Mine some blocks on node2 which we will need at the end to generate a few transactions from that node\n@@ -345,7 +346,7 @@ class ParallelTest (BitcoinTestFramework):\n for i in range(num_range):\n self.nodes[5].sendtoaddress(self.nodes[5].getnewaddress(), 0.01)\n \n- # Mine 5 competing blocks. This should not cause a crash or failure to sync nodes.\n+ # Mine 5 competing blocks.\n print (\"Mine 5 competing blocks...\")\n self.nodes[0].generate(1)\n self.nodes[2].generate(1)\n@@ -379,17 +380,403 @@ class ParallelTest (BitcoinTestFramework):\n self.nodes[5].generate(1)\n sync_blocks(self.nodes)\n \n+ # Mine another block which will cause the nodes to sync to one chain\n+ print (\"Mine another block...\")\n+ self.nodes[0].generate(1)\n+ sync_blocks(self.nodes)\n+\n+ #stop nodes\n+ stop_nodes(self.nodes)\n+ wait_bitcoinds()\n+\n+ \n+\n+ ################################################\n+ # Begin extended tests\n+ ################################################\n+ if self.longTest == False:\n+ return\n+ \n+ #def run_attack_block_scenario (self):\n+\n+ ###########################################################################################\n+ # Test the 4 block attack scenarios - use -pvtest=true to slow down the checking of inputs.\n+ ###########################################################################################\n+\n+ ####################################################################\n+ # Mine 4 blocks of all different sizes\n+ # - the smallest block should win\n+ self.nodes.append(start_node(0, self.options.tmpdir, [\"-debug\",\"-pvtest=0\"]))\n+ self.nodes.append(start_node(1, self.options.tmpdir, [\"-debug\",\"-pvtest=0\"]))\n+ self.nodes.append(start_node(2, self.options.tmpdir, [\"-debug\",\"-pvtest=0\"]))\n+ self.nodes.append(start_node(3, self.options.tmpdir, [\"-debug\",\"-pvtest=0\"]))\n+ self.nodes.append(start_node(4, self.options.tmpdir, [\"-debug\",\"-pvtest=0\"]))\n+\n+ print (\"Send more transactions...\")\n+ num_range = 15\n+ for i in range(num_range):\n+ self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 0.01)\n+ num_range = 14\n+ for i in range(num_range):\n+ self.nodes[2].sendtoaddress(self.nodes[2].getnewaddress(), 0.01)\n+ num_range = 13\n+ for i in range(num_range):\n+ self.nodes[3].sendtoaddress(self.nodes[3].getnewaddress(), 0.01)\n+ num_range = 2\n+ for i in range(num_range):\n+ self.nodes[4].sendtoaddress(self.nodes[4].getnewaddress(), 0.01)\n+\n+ # Mine 4 competing blocks.\n+ print (\"Mine 4 competing blocks...\")\n+ self.nodes[0].generate(1)\n+ self.nodes[2].generate(1)\n+ self.nodes[3].generate(1)\n+ self.nodes[4].generate(1)\n+\n+ # stop nodes\n+ stop_nodes(self.nodes)\n+ wait_bitcoinds()\n+\n+ # start nodes with -pvtest set to true.\n+ self.nodes.append(start_node(0, self.options.tmpdir, [\"-debug\",\"-pvtest=1\"]))\n+ self.nodes.append(start_node(1, self.options.tmpdir, [\"-debug\",\"-pvtest=1\"]))\n+ self.nodes.append(start_node(2, self.options.tmpdir, [\"-debug\",\"-pvtest=1\"]))\n+ self.nodes.append(start_node(3, self.options.tmpdir, [\"-debug\",\"-pvtest=1\"]))\n+ self.nodes.append(start_node(4, self.options.tmpdir, [\"-debug\",\"-pvtest=1\"]))\n+\n+ # Connect nodes so that all blocks are sent at same time to node1.\n+ connect_nodes(self.nodes[1],0)\n+ connect_nodes(self.nodes[1],2)\n+ connect_nodes(self.nodes[1],3)\n+ connect_nodes(self.nodes[1],4)\n+ sync_blocks(self.nodes)\n+ assert_equal(self.nodes[1].getbestblockhash(), self.nodes[4].getbestblockhash())\n+ \n+ # stop nodes\n+ stop_nodes(self.nodes)\n+ wait_bitcoinds()\n+\n+ self.nodes.append(start_node(0, self.options.tmpdir, [\"-debug\",\"-pvtest=0\"]))\n+ self.nodes.append(start_node(1, self.options.tmpdir, [\"-debug\",\"-pvtest=0\"]))\n+ self.nodes.append(start_node(2, self.options.tmpdir, [\"-debug\",\"-pvtest=0\"]))\n+ self.nodes.append(start_node(3, self.options.tmpdir, [\"-debug\",\"-pvtest=0\"]))\n+ self.nodes.append(start_node(4, self.options.tmpdir, [\"-debug\",\"-pvtest=0\"]))\n+ self.nodes.append(start_node(5, self.options.tmpdir, [\"-debug\",\"-pvtest=0\"]))\n+\n+ connect_nodes(self.nodes[1],0)\n+ connect_nodes(self.nodes[1],2)\n+ connect_nodes(self.nodes[1],3)\n+ connect_nodes(self.nodes[1],4)\n+ connect_nodes(self.nodes[1],5)\n+ sync_blocks(self.nodes)\n+\n+ # Mine a block which will cause all nodes to update their chains\n+ print (\"Mine another block...\")\n+ self.nodes[1].generate(1)\n+ time.sleep(2) #wait for blocks to propagate\n+ sync_blocks(self.nodes)\n+ assert_equal(self.nodes[1].getbestblockhash(), self.nodes[0].getbestblockhash())\n+ assert_equal(self.nodes[1].getbestblockhash(), self.nodes[2].getbestblockhash())\n+ assert_equal(self.nodes[1].getbestblockhash(), self.nodes[3].getbestblockhash())\n+ assert_equal(self.nodes[1].getbestblockhash(), self.nodes[4].getbestblockhash())\n+ \n+\n+ print (\"Mine more blocks on each node...\")\n+ self.nodes[0].generate(25)\n+ sync_blocks(self.nodes)\n+ self.nodes[1].generate(25)\n+ sync_blocks(self.nodes)\n+ self.nodes[2].generate(25)\n+ sync_blocks(self.nodes)\n+ self.nodes[3].generate(25)\n+ sync_blocks(self.nodes)\n+ self.nodes[4].generate(25)\n+ sync_blocks(self.nodes)\n+ self.nodes[5].generate(25)\n+ sync_blocks(self.nodes)\n+\n+ #stop nodes\n+ stop_nodes(self.nodes)\n+ wait_bitcoinds()\n+\n+ ########################################################################################################\n+ # Mine 4 blocks all the same size and get them to start validating and then send a 5th that is smaller\n+ # - the last smallest and last block arriving should win.\n+ self.nodes.append(start_node(0, self.options.tmpdir, [\"-debug\",\"-pvtest=0\"]))\n+ self.nodes.append(start_node(1, self.options.tmpdir, [\"-debug\",\"-pvtest=0\"]))\n+ self.nodes.append(start_node(2, self.options.tmpdir, [\"-debug\",\"-pvtest=0\"]))\n+ self.nodes.append(start_node(3, self.options.tmpdir, [\"-debug\",\"-pvtest=0\"]))\n+ self.nodes.append(start_node(4, self.options.tmpdir, [\"-debug\",\"-pvtest=0\"]))\n+ self.nodes.append(start_node(5, self.options.tmpdir, [\"-debug\",\"-pvtest=0\"]))\n+\n+ print (\"Send more transactions...\")\n+ num_range = 15\n+ for i in range(num_range):\n+ self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 0.01)\n+ num_range = 15\n+ for i in range(num_range):\n+ self.nodes[2].sendtoaddress(self.nodes[2].getnewaddress(), 0.01)\n+ num_range = 15\n+ for i in range(num_range):\n+ self.nodes[3].sendtoaddress(self.nodes[3].getnewaddress(), 0.01)\n+ num_range = 15\n+ for i in range(num_range):\n+ self.nodes[4].sendtoaddress(self.nodes[4].getnewaddress(), 0.01)\n+ num_range = 2\n+ for i in range(num_range):\n+ self.nodes[5].sendtoaddress(self.nodes[5].getnewaddress(), 0.01)\n+\n+ # stop nodes\n+ stop_nodes(self.nodes)\n+ wait_bitcoinds()\n+\n+ # start nodes with -pvtest set to true.\n+ self.nodes.append(start_node(0, self.options.tmpdir, [\"-debug\",\"-pvtest=1\"]))\n+ self.nodes.append(start_node(1, self.options.tmpdir, [\"-debug\",\"-pvtest=1\"]))\n+ self.nodes.append(start_node(2, self.options.tmpdir, [\"-debug\",\"-pvtest=1\"]))\n+ self.nodes.append(start_node(3, self.options.tmpdir, [\"-debug\",\"-pvtest=1\"]))\n+ self.nodes.append(start_node(4, self.options.tmpdir, [\"-debug\",\"-pvtest=1\"]))\n+ self.nodes.append(start_node(5, self.options.tmpdir, [\"-debug\",\"-pvtest=1\"]))\n+\n+ # Connect nodes so that first 4 blocks are sent at same time to node1.\n+ connect_nodes(self.nodes[1],0)\n+ connect_nodes(self.nodes[1],2)\n+ connect_nodes(self.nodes[1],3)\n+ connect_nodes(self.nodes[1],4)\n+ time.sleep(5) #wait for blocks to start processing\n+ \n+ # Connect 5th block and this one should win the race\n+ connect_nodes(self.nodes[1],5)\n+ sync_blocks(self.nodes)\n+ assert_equal(self.nodes[1].getbestblockhash(), self.nodes[5].getbestblockhash())\n+ \n+ #stop nodes\n+ stop_nodes(self.nodes)\n+ wait_bitcoinds()\n+\n+ self.nodes.append(start_node(0, self.options.tmpdir, [\"-debug\",\"-pvtest=0\"]))\n+ self.nodes.append(start_node(1, self.options.tmpdir, [\"-debug\",\"-pvtest=0\"]))\n+ self.nodes.append(start_node(2, self.options.tmpdir, [\"-debug\",\"-pvtest=0\"]))\n+ self.nodes.append(start_node(3, self.options.tmpdir, [\"-debug\",\"-pvtest=0\"]))\n+ self.nodes.append(start_node(4, self.options.tmpdir, [\"-debug\",\"-pvtest=0\"]))\n+ self.nodes.append(start_node(5, self.options.tmpdir, [\"-debug\",\"-pvtest=0\"]))\n+\n+ connect_nodes(self.nodes[1],0)\n+ connect_nodes(self.nodes[1],2)\n+ connect_nodes(self.nodes[1],3)\n+ connect_nodes(self.nodes[1],4)\n+ connect_nodes(self.nodes[1],5)\n+\n+ # Mine a block which will cause all nodes to update their chains\n+ print (\"Mine another block...\")\n+ self.nodes[1].generate(1)\n+ time.sleep(2) #wait for blocks to propagate\n+ sync_blocks(self.nodes)\n+ assert_equal(self.nodes[1].getbestblockhash(), self.nodes[0].getbestblockhash())\n+ assert_equal(self.nodes[1].getbestblockhash(), self.nodes[2].getbestblockhash())\n+ assert_equal(self.nodes[1].getbestblockhash(), self.nodes[3].getbestblockhash())\n+ assert_equal(self.nodes[1].getbestblockhash(), self.nodes[4].getbestblockhash())\n+ assert_equal(self.nodes[1].getbestblockhash(), self.nodes[5].getbestblockhash())\n+ \n+ print (\"Mine more blocks on each node...\")\n+ self.nodes[0].generate(25)\n+ sync_blocks(self.nodes)\n+ self.nodes[1].generate(25)\n+ sync_blocks(self.nodes)\n+ self.nodes[2].generate(25)\n+ sync_blocks(self.nodes)\n+ self.nodes[3].generate(25)\n+ sync_blocks(self.nodes)\n+ self.nodes[4].generate(25)\n+ sync_blocks(self.nodes)\n+ self.nodes[5].generate(25)\n+ sync_blocks(self.nodes)\n+\n+ # stop nodes\n+ stop_nodes(self.nodes)\n+ wait_bitcoinds()\n+\n+ ############################################################################################################\n+ # Mine 4 blocks all the same size and get them to start validating and then send a 5th that is the same size\n+ # - the first block arriving should win\n+ self.nodes.append(start_node(0, self.options.tmpdir, [\"-debug\",\"-pvtest=0\"]))\n+ self.nodes.append(start_node(1, self.options.tmpdir, [\"-debug\",\"-pvtest=0\"]))\n+ self.nodes.append(start_node(2, self.options.tmpdir, [\"-debug\",\"-pvtest=0\"]))\n+ self.nodes.append(start_node(3, self.options.tmpdir, [\"-debug\",\"-pvtest=0\"]))\n+ self.nodes.append(start_node(4, self.options.tmpdir, [\"-debug\",\"-pvtest=0\"]))\n+ self.nodes.append(start_node(5, self.options.tmpdir, [\"-debug\",\"-pvtest=0\"]))\n+\n+ print (\"Send more transactions...\")\n+ num_range = 10\n+ for i in range(num_range):\n+ self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 0.01)\n+ num_range = 10\n+ for i in range(num_range):\n+ self.nodes[2].sendtoaddress(self.nodes[2].getnewaddress(), 0.01)\n+ num_range = 10\n+ for i in range(num_range):\n+ self.nodes[3].sendtoaddress(self.nodes[3].getnewaddress(), 0.01)\n+ num_range = 10\n+ for i in range(num_range):\n+ self.nodes[4].sendtoaddress(self.nodes[4].getnewaddress(), 0.01)\n+ num_range = 10\n+ for i in range(num_range):\n+ self.nodes[5].sendtoaddress(self.nodes[5].getnewaddress(), 0.01)\n+\n+ # stop nodes\n+ stop_nodes(self.nodes)\n+ wait_bitcoinds()\n+\n+ # start nodes with -pvtest set to true.\n+ self.nodes.append(start_node(0, self.options.tmpdir, [\"-debug\",\"-pvtest=1\"]))\n+ self.nodes.append(start_node(1, self.options.tmpdir, [\"-debug\",\"-pvtest=1\"]))\n+ self.nodes.append(start_node(2, self.options.tmpdir, [\"-debug\",\"-pvtest=1\"]))\n+ self.nodes.append(start_node(3, self.options.tmpdir, [\"-debug\",\"-pvtest=1\"]))\n+ self.nodes.append(start_node(4, self.options.tmpdir, [\"-debug\",\"-pvtest=1\"]))\n+ self.nodes.append(start_node(5, self.options.tmpdir, [\"-debug\",\"-pvtest=1\"]))\n+\n+ # Connect nodes so that first 4 blocks are sent 1 second apart to node1.\n+ connect_nodes(self.nodes[1],0)\n+ time.sleep(1)\n+ connect_nodes(self.nodes[1],2)\n+ time.sleep(1)\n+ connect_nodes(self.nodes[1],3)\n+ time.sleep(1)\n+ connect_nodes(self.nodes[1],4)\n+ time.sleep(1) #wait for blocks to start processing\n+ \n+ # Connect 5th block and this one be terminated and the first block to connect from node0 should win the race\n+ connect_nodes(self.nodes[1],5)\n+ sync_blocks(self.nodes)\n+ assert_equal(self.nodes[1].getbestblockhash(), self.nodes[0].getbestblockhash())\n+ \n #stop nodes\n stop_nodes(self.nodes)\n wait_bitcoinds()\n \n+ self.nodes.append(start_node(0, self.options.tmpdir, [\"-debug\",\"-pvtest=0\"]))\n+ self.nodes.append(start_node(1, self.options.tmpdir, [\"-debug\",\"-pvtest=0\"]))\n+ self.nodes.append(start_node(2, self.options.tmpdir, [\"-debug\",\"-pvtest=0\"]))\n+ self.nodes.append(start_node(3, self.options.tmpdir, [\"-debug\",\"-pvtest=0\"]))\n+ self.nodes.append(start_node(4, self.options.tmpdir, [\"-debug\",\"-pvtest=0\"]))\n+ self.nodes.append(start_node(5, self.options.tmpdir, [\"-debug\",\"-pvtest=0\"]))\n+\n+ connect_nodes(self.nodes[1],0)\n+ connect_nodes(self.nodes[1],2)\n+ connect_nodes(self.nodes[1],3)\n+ connect_nodes(self.nodes[1],4)\n+ connect_nodes(self.nodes[1],5)\n+\n+ # Mine a block which will cause all nodes to update their chains\n+ print (\"Mine another block...\")\n+ self.nodes[1].generate(1)\n+ time.sleep(2) #wait for blocks to propagate\n+ sync_blocks(self.nodes)\n+ assert_equal(self.nodes[1].getbestblockhash(), self.nodes[0].getbestblockhash())\n+ assert_equal(self.nodes[1].getbestblockhash(), self.nodes[2].getbestblockhash())\n+ assert_equal(self.nodes[1].getbestblockhash(), self.nodes[3].getbestblockhash())\n+ assert_equal(self.nodes[1].getbestblockhash(), self.nodes[4].getbestblockhash())\n+ assert_equal(self.nodes[1].getbestblockhash(), self.nodes[5].getbestblockhash())\n+ \n+ # stop nodes\n+ stop_nodes(self.nodes)\n+ wait_bitcoinds()\n+\n+ #########################################################################################################\n+ # Mine 4 blocks all the same size and get them to start validating and then send a 5th that is bigger\n+ # - the first block arriving should win\n+ self.nodes.append(start_node(0, self.options.tmpdir, [\"-debug\",\"-pvtest=0\"]))\n+ self.nodes.append(start_node(1, self.options.tmpdir, [\"-debug\",\"-pvtest=0\"]))\n+ self.nodes.append(start_node(2, self.options.tmpdir, [\"-debug\",\"-pvtest=0\"]))\n+ self.nodes.append(start_node(3, self.options.tmpdir, [\"-debug\",\"-pvtest=0\"]))\n+ self.nodes.append(start_node(4, self.options.tmpdir, [\"-debug\",\"-pvtest=0\"]))\n+ self.nodes.append(start_node(5, self.options.tmpdir, [\"-debug\",\"-pvtest=0\"]))\n+\n+ print (\"Send more transactions...\")\n+ num_range = 10\n+ for i in range(num_range):\n+ self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 0.01)\n+ num_range = 10\n+ for i in range(num_range):\n+ self.nodes[2].sendtoaddress(self.nodes[2].getnewaddress(), 0.01)\n+ num_range = 10\n+ for i in range(num_range):\n+ self.nodes[3].sendtoaddress(self.nodes[3].getnewaddress(), 0.01)\n+ num_range = 10\n+ for i in range(num_range):\n+ self.nodes[4].sendtoaddress(self.nodes[4].getnewaddress(), 0.01)\n+ num_range = 20\n+ for i in range(num_range):\n+ self.nodes[5].sendtoaddress(self.nodes[5].getnewaddress(), 0.01)\n+\n+ # stop nodes\n+ stop_nodes(self.nodes)\n+ wait_bitcoinds()\n+\n+ # start nodes with -pvtest set to true.\n+ self.nodes.append(start_node(0, self.options.tmpdir, [\"-debug\",\"-pvtest=1\"]))\n+ self.nodes.append(start_node(1, self.options.tmpdir, [\"-debug\",\"-pvtest=1\"]))\n+ self.nodes.append(start_node(2, self.options.tmpdir, [\"-debug\",\"-pvtest=1\"]))\n+ self.nodes.append(start_node(3, self.options.tmpdir, [\"-debug\",\"-pvtest=1\"]))\n+ self.nodes.append(start_node(4, self.options.tmpdir, [\"-debug\",\"-pvtest=1\"]))\n+ self.nodes.append(start_node(5, self.options.tmpdir, [\"-debug\",\"-pvtest=1\"]))\n+\n+ # Connect nodes so that first 4 blocks are sent 1 second apart to node1.\n+ connect_nodes(self.nodes[1],0)\n+ time.sleep(1)\n+ connect_nodes(self.nodes[1],2)\n+ time.sleep(1)\n+ connect_nodes(self.nodes[1],3)\n+ time.sleep(1)\n+ connect_nodes(self.nodes[1],4)\n+ time.sleep(1) #wait for blocks to start processing\n+ \n+ # Connect 5th block and this one be terminated and the first block to connect from node0 should win the race\n+ connect_nodes(self.nodes[1],5)\n+ sync_blocks(self.nodes)\n+ assert_equal(self.nodes[1].getbestblockhash(), self.nodes[0].getbestblockhash())\n+ \n+ # stop nodes\n+ stop_nodes(self.nodes)\n+ wait_bitcoinds()\n+\n+ self.nodes.append(start_node(0, self.options.tmpdir, [\"-debug\",\"-pvtest=0\"]))\n+ self.nodes.append(start_node(1, self.options.tmpdir, [\"-debug\",\"-pvtest=0\"]))\n+ self.nodes.append(start_node(2, self.options.tmpdir, [\"-debug\",\"-pvtest=0\"]))\n+ self.nodes.append(start_node(3, self.options.tmpdir, [\"-debug\",\"-pvtest=0\"]))\n+ self.nodes.append(start_node(4, self.options.tmpdir, [\"-debug\",\"-pvtest=0\"]))\n+ self.nodes.append(start_node(5, self.options.tmpdir, [\"-debug\",\"-pvtest=0\"]))\n+\n+ connect_nodes(self.nodes[1],0)\n+ connect_nodes(self.nodes[1],2)\n+ connect_nodes(self.nodes[1],3)\n+ connect_nodes(self.nodes[1],4)\n+ connect_nodes(self.nodes[1],5)\n+\n+ # Mine a block which will cause all nodes to update their chains\n+ print (\"Mine another block...\")\n+ self.nodes[1].generate(1)\n+ time.sleep(2) #wait for blocks to propagate\n+ sync_blocks(self.nodes)\n+ assert_equal(self.nodes[1].getbestblockhash(), self.nodes[0].getbestblockhash())\n+ assert_equal(self.nodes[1].getbestblockhash(), self.nodes[2].getbestblockhash())\n+ assert_equal(self.nodes[1].getbestblockhash(), self.nodes[3].getbestblockhash())\n+ assert_equal(self.nodes[1].getbestblockhash(), self.nodes[4].getbestblockhash())\n+ assert_equal(self.nodes[1].getbestblockhash(), self.nodes[5].getbestblockhash())\n+ \n+ # stop nodes\n+ stop_nodes(self.nodes)\n+ wait_bitcoinds()\n+\n+\n \n def Test():\n t = ParallelTest()\n t.rep = True\n t.main([\"--tmpdir=/ramdisk/test\", \"--nocleanup\",\"--noshutdown\"])\n \n-if __name__ == '__main__':\n+if __name__ == '__main__': \n+\n p = ParallelTest() \n if \"--rep\" in sys.argv:\n print(\"Repetitive test\")\n@@ -397,6 +784,19 @@ if __name__ == '__main__':\n sys.argv.remove(\"--rep\")\n else:\n p.rep = False\n- \n+\n+ if \"--extensive\" in sys.argv:\n+ p.longTest = True\n+ # we must remove duplicate 'extensive' arg here\n+ while True:\n+ try:\n+ sys.argv.remove('--extensive')\n+ except:\n+ break\n+ print (\"Running extensive tests\")\n+ else:\n+ p.longTest = False\n+\n+\n p.main ()\n \n"
},
{
"change_type": "MODIFY",
"old_path": "src/allowed_args.cpp",
"new_path": "src/allowed_args.cpp",
"diff": "@@ -473,6 +473,7 @@ static void addDebuggingOptions(AllowedArgs &allowedArgs, HelpMessageMode mode)\n .addDebugArg(\"testsafemode\", optionalBool, strprintf(\"Force safe mode (default: %u)\", DEFAULT_TESTSAFEMODE))\n .addDebugArg(\"dropmessagestest=<n>\", requiredInt, \"Randomly drop 1 of every <n> network messages\")\n .addDebugArg(\"fuzzmessagestest=<n>\", requiredInt, \"Randomly fuzz 1 of every <n> network messages\")\n+ .addDebugArg(\"pvtest\", optionalBool, strprintf(\"Slow down input checking to 1 every second (default: %u)\", DEFAULT_PV_TESTMODE))\n #ifdef ENABLE_WALLET\n .addDebugArg(\"flushwallet\", optionalBool,\n strprintf(\"Run a thread to flush wallet periodically (default: %u)\", DEFAULT_FLUSHWALLET))\n"
},
{
"change_type": "MODIFY",
"old_path": "src/init.h",
"new_path": "src/init.h",
"diff": "@@ -35,6 +35,7 @@ static const bool DEFAULT_PROXYRANDOMIZE = true;\n static const bool DEFAULT_REST_ENABLE = false;\n static const bool DEFAULT_DISABLE_SAFEMODE = false;\n static const bool DEFAULT_STOPAFTERBLOCKIMPORT = false;\n+static const bool DEFAULT_PV_TESTMODE = false;\n \n /** Returns licensing information (for -version) */\n std::string LicenseInfo();\n"
},
{
"change_type": "MODIFY",
"old_path": "src/main.cpp",
"new_path": "src/main.cpp",
"diff": "@@ -2632,6 +2632,11 @@ bool ConnectBlock(const CBlock &block,\n {\n return false;\n }\n+\n+ // This is for testing PV and slowing down the validation of inputs. This makes it easier to create\n+ // and run python regression tests and is an testing feature.\n+ if (GetArg(\"-pvtest\", false))\n+ MilliSleep(1000);\n }\n LogPrint(\"thin\", \"Number of CheckInputs() performed: %d Orphan count: %d\\n\", nChecked, nOrphansChecked);\n \n"
}
] |
01975338cdb2444471df8371766f8a64815c1ad6 | bitcoinunlimited/bitcoinunlimited | 04.04.2019 14:37:51 | MIT License | Improve reliablility of p2p-acceptblock.py
Numerous timing issues do with the the multi-threading and also
the requestmanager. Better to no rely on sync_with_ping() and also
we have to handle getheaders requests and subsequent multi hash
getdata requests. | [
{
"change_type": "MODIFY",
"old_path": "qa/rpc-tests/p2p-acceptblock.py",
"new_path": "qa/rpc-tests/p2p-acceptblock.py",
"diff": "@@ -64,13 +64,31 @@ class TestNode(NodeConnCB):\n self.connection = None\n self.ping_counter = 1\n self.last_pong = msg_pong()\n+ self.last_getdata = []\n+\n+ def sync_getdata(self, hash_list, timeout=60):\n+ while timeout > 0:\n+ with mininode_lock:\n+ #Check whether any getdata responses are in the hash list and\n+ #if so remove them from both lists.\n+ for x in self.last_getdata:\n+ for y in hash_list:\n+ if (str(x.inv).find(hex(y)[2:]) > 0):\n+ self.last_getdata.remove(x)\n+ hash_list.remove(y)\n+ if hash_list == []:\n+ return\n+\n+ time.sleep(0.1)\n+ timeout -= 0.1\n+ raise AssertionError(\"Sync getdata failed to complete\")\n \n def add_connection(self, conn):\n self.connection = conn\n \n # Track the last getdata message we receive (used in the test)\n def on_getdata(self, conn, message):\n- self.last_getdata = message\n+ self.last_getdata.append(message)\n \n # Spin until verack message is received from the node.\n # We use this to signal that our test can begin. This\n@@ -119,10 +137,10 @@ class AcceptBlockTest(BitcoinTestFramework):\n # from peers which are not whitelisted, while Node1 will be used for\n # the whitelisted case.\n self.nodes = []\n- self.nodes.append(start_node(0, self.options.tmpdir, [\"-debug\"],\n+ self.nodes.append(start_node(0, self.options.tmpdir, [\"-debug=net\", \"-debug=req\"],\n binary=self.options.testbinary))\n self.nodes.append(start_node(1, self.options.tmpdir,\n- [\"-debug\", \"-whitelist=127.0.0.1\"],\n+ [\"-debug=net\", \"-debug=req\", \"-whitelist=127.0.0.1\"],\n binary=self.options.testbinary))\n \n def run_test(self):\n@@ -157,7 +175,8 @@ class AcceptBlockTest(BitcoinTestFramework):\n test_node.send_message(msg_block(blocks_h2[0]))\n white_node.send_message(msg_block(blocks_h2[1]))\n \n- [ x.sync_with_ping() for x in [test_node, white_node] ]\n+ waitFor(10, lambda: self.nodes[0].getblockcount() == 2)\n+ waitFor(10, lambda: self.nodes[1].getblockcount() == 2)\n assert_equal(self.nodes[0].getblockcount(), 2)\n assert_equal(self.nodes[1].getblockcount(), 2)\n print(\"First height 2 block accepted by both nodes\")\n@@ -225,6 +244,7 @@ class AcceptBlockTest(BitcoinTestFramework):\n tips[j] = next_block\n test_node.sync_with_ping()\n time.sleep(2)\n+\n for x in all_blocks:\n try:\n self.nodes[0].getblock(x.hash)\n@@ -240,7 +260,7 @@ class AcceptBlockTest(BitcoinTestFramework):\n white_node.send_message(headers_message) # Send headers leading to tip\n white_node.send_message(msg_block(tips[1])) # Now deliver the tip\n try:\n- white_node.sync_with_ping()\n+ time.sleep(2) # give time for the tip to be delivered\n self.nodes[1].getblock(tips[1].hash)\n print(\"Unrequested block far ahead of tip accepted from whitelisted peer\")\n except:\n@@ -256,7 +276,7 @@ class AcceptBlockTest(BitcoinTestFramework):\n # the node processes it and incorrectly advances the tip).\n # But this would be caught later on, when we verify that an inv triggers\n # a getdata request for this block.\n- test_node.sync_with_ping()\n+ waitFor(10, lambda: self.nodes[0].getblockcount() == 2)\n assert_equal(self.nodes[0].getblockcount(), 2)\n print(\"Unrequested block that would complete more-work chain was ignored\")\n \n@@ -265,27 +285,18 @@ class AcceptBlockTest(BitcoinTestFramework):\n # triggers a getdata on block 2 (it should if block 2 is missing).\n with mininode_lock:\n # Clear state so we can check the getdata request\n- test_node.last_getdata = None\n+ test_node.last_getdata = []\n test_node.send_message(msg_inv([CInv(2, blocks_h3[0].sha256)]))\n \n- test_node.sync_with_ping()\n- with mininode_lock:\n- getdata = test_node.last_getdata\n-\n # Check that the getdata includes the right block\n- assert_equal(getdata.inv[0].hash, blocks_h2f[0].sha256)\n+ test_node.sync_getdata([blocks_h2f[0].sha256], timeout=10)\n print(\"Inv at tip triggered getdata for unprocessed block\")\n \n # 7. Send the missing block for the third time (now it is requested)\n test_node.send_message(msg_block(blocks_h2f[0]))\n- test_node.sync_with_ping()\n \n # Wait for the reorg to complete. It can be slower on some systems.\n- while self.nodes[0].getblockcount() != 290:\n- time.sleep(1)\n- j = j + 1\n- if (j > 60):\n- break\n+ waitFor(30, lambda: self.nodes[0].getblockcount() == 290)\n \n assert_equal(self.nodes[0].getblockcount(), 290)\n print(\"Successfully reorged to longer chain from non-whitelisted peer\")\n"
}
] |
05859b201594e87ff4d4472dd3914ef041314558 | graphite-project/graphite-web | 01.11.2018 15:42:56 | Apache License 2.0 | Add a minValue option to nonNegativeDerivative and perSecond
It works in a way similar to maxValue: when the counter wraps, instead of
producing a null value, it computes the difference assuming the counter wrapped
to minValue. | [
{
"change_type": "MODIFY",
"old_path": "webapp/graphite/render/functions.py",
"new_path": "webapp/graphite/render/functions.py",
"diff": "@@ -1973,12 +1973,15 @@ derivative.params = [\n ]\n \n \n-def perSecond(requestContext, seriesList, maxValue=None):\n+def perSecond(requestContext, seriesList, maxValue=None, minValue=None):\n \"\"\"\n NonNegativeDerivative adjusted for the series time interval\n This is useful for taking a running total metric and showing how many requests\n per second were handled.\n \n+ The optional ``minValue`` and ``maxValue`` parameters have the same\n+ meaning as in ``nonNegativeDerivative``.\n+\n Example:\n \n .. code-block:: none\n@@ -1997,7 +2000,7 @@ def perSecond(requestContext, seriesList, maxValue=None):\n step = series.step\n \n for val in series:\n- delta, prev = _nonNegativeDelta(val, prev, maxValue)\n+ delta, prev = _nonNegativeDelta(val, prev, maxValue, minValue)\n \n if delta is not None:\n # Division long by float cause OverflowError\n@@ -2020,6 +2023,7 @@ perSecond.group = 'Transform'\n perSecond.params = [\n Param('seriesList', ParamTypes.seriesList, required=True),\n Param('maxValue', ParamTypes.float),\n+ Param('minValue', ParamTypes.float),\n ]\n \n \n@@ -2147,13 +2151,18 @@ integralByInterval.params = [\n ]\n \n \n-def nonNegativeDerivative(requestContext, seriesList, maxValue=None):\n+def nonNegativeDerivative(requestContext, seriesList, maxValue=None, minValue=None):\n \"\"\"\n Same as the derivative function above, but ignores datapoints that trend\n down. Useful for counters that increase for a long time, then wrap or\n reset. (Such as if a network interface is destroyed and recreated by unloading\n and re-loading a kernel module, common with USB / WiFi cards.\n \n+ By default, a null value is returned in place of negative datapoints. When\n+ ``maxValue`` is supplied, the missing value is computed as if the counter\n+ had wrapped at ``maxValue``. When ``minValue`` is supplied, the missing\n+ value is computed as if the counter had wrapped to ``minValue``.\n+\n Example:\n \n .. code-block:: none\n@@ -2168,7 +2177,7 @@ def nonNegativeDerivative(requestContext, seriesList, maxValue=None):\n prev = None\n \n for val in series:\n- delta, prev = _nonNegativeDelta(val, prev, maxValue)\n+ delta, prev = _nonNegativeDelta(val, prev, maxValue, minValue)\n \n newValues.append(delta)\n \n@@ -2184,13 +2193,16 @@ nonNegativeDerivative.group = 'Transform'\n nonNegativeDerivative.params = [\n Param('seriesList', ParamTypes.seriesList, required=True),\n Param('maxValue', ParamTypes.float),\n+ Param('minValue', ParamTypes.float),\n ]\n \n \n-def _nonNegativeDelta(val, prev, maxValue):\n+def _nonNegativeDelta(val, prev, maxValue, minValue):\n # ignore values larger than maxValue\n if maxValue is not None and val > maxValue:\n return None, None\n+ if minValue is not None and val < minValue:\n+ return None, None\n \n # first reading\n if None in (prev, val):\n@@ -2200,12 +2212,16 @@ def _nonNegativeDelta(val, prev, maxValue):\n if val >= prev:\n return val - prev, val\n \n- # counter wrapped and we have maxValue\n- # calculate delta based on maxValue + 1 + val - prev\n+ # counter wrapped and we have maxValue (and optionally minValue)\n+ # calculate delta based on maxValue + 1 + val - prev - minValue\n if maxValue is not None:\n- return maxValue + 1 + val - prev, val\n+ return maxValue + 1 + val - prev - (minValue or 0), val\n+ # counter wrapped and we have maxValue\n+ # calculate delta based on val - minValue\n+ if minValue is not None:\n+ return val - minValue, val\n \n- # counter wrapped or reset and we don't have maxValue\n+ # counter wrapped or reset and we don't have minValue/maxValue\n # just use None\n return None, val\n \n"
},
{
"change_type": "MODIFY",
"old_path": "webapp/tests/test_functions.py",
"new_path": "webapp/tests/test_functions.py",
"diff": "@@ -1637,6 +1637,18 @@ class FunctionsTest(TestCase):\n result = functions.nonNegativeDerivative({}, seriesList,5)\n self.assertEqual(expected, result, 'nonNegativeDerivative result incorrect')\n \n+ def test_nonNegativeDerivative_min(self):\n+ seriesList = self._gen_series_list_with_data(key='test',start=0,end=600,step=60,data=[0, 1, 2, 3, 4, 5, 2, 3, 4, 5])\n+ expected = [TimeSeries('nonNegativeDerivative(test)', 0, 600, 60, [None, None, 1, 1, 1, 1, 1, 1, 1, 1])]\n+ result = functions.nonNegativeDerivative({}, seriesList,None,1)\n+ self.assertEqual(expected, result, 'nonNegativeDerivative result incorrect')\n+\n+ def test_nonNegativeDerivative_min_max(self):\n+ seriesList = self._gen_series_list_with_data(key='test',start=0,end=600,step=60,data=[0, 1, 2, 3, 4, 5, 2, 3, 4, 5])\n+ expected = [TimeSeries('nonNegativeDerivative(test)', 0, 600, 60, [None, None, 1, 1, 1, 1, 3, 1, 1, 1])]\n+ result = functions.nonNegativeDerivative({}, seriesList,6,1)\n+ self.assertEqual(expected, result, 'nonNegativeDerivative result incorrect')\n+\n def test_perSecond(self):\n seriesList = self._gen_series_list_with_data(key='test',start=0,end=600,step=60,data=[0, 120, 240, 480, 960, 1920, 3840, 7680, 15360, 60 ** 256 + 15360 ])\n expected = [TimeSeries('perSecond(test)', 0, 600, 60, [None, 2, 2, 4, 8, 16, 32, 64, 128, 60 ** 255])]\n"
}
] |
c3f6817a03d60af66b4cabb47d7ecbc642bf6376 | sabeechen/hassio-google-drive-backup | 22.08.2022 08:08:06 | MIT License | Use alternate headers for HA auth requests
When sending a "Authorization: Bearer" header to the supervisor's /auth endpoint, the header gets used to authorize the addon and then _also_ authorize against HA which fails. This changes requests made to the /auth enpoint so they use the "X-Supervisor-Token" header to avoid the conflict. | [
{
"change_type": "MODIFY",
"old_path": "hassio-google-drive-backup/backup/ha/harequests.py",
"new_path": "hassio-google-drive-backup/backup/ha/harequests.py",
"diff": "@@ -18,6 +18,8 @@ from yarl import URL\n \r\n logger = getLogger(__name__)\r\n \r\n+HEADER_TOKEN = \"X-Supervisor-Token\"\r\n+\r\n NOTIFICATION_ID = \"backup_broken\"\r\n EVENT_BACKUP_START = \"backup_started\"\r\n EVENT_BACKUP_END = \"backup_ended\"\r\n@@ -81,7 +83,7 @@ class HaRequests():\n \r\n @supervisor_call\r\n async def auth(self, user: str, password: str) -> None:\r\n- await self._postHassioData(self.getSupervisorURL().with_path(\"auth\"), {\"username\": user, \"password\": password})\r\n+ await self._postHassioData(self.getSupervisorURL().with_path(\"auth\"), {\"username\": user, \"password\": password}, headers=self._altAuthHeaders())\r\n \r\n @supervisor_call\r\n async def upload(self, stream):\r\n@@ -178,7 +180,7 @@ class HaRequests():\n async def download(self, slug) -> AsyncHttpGetter:\r\n url = self.getSupervisorURL().with_path(\"{1}/{0}/download\".format(slug, self._getBackupPath()))\r\n ret = AsyncHttpGetter(url,\r\n- self._getHassioHeaders(),\r\n+ self._getAuthHeaders(),\r\n self.session,\r\n timeoutFactory=SupervisorTimeoutError.factory,\r\n otherErrorFactory=SupervisorUnexpectedError.factory,\r\n@@ -190,14 +192,14 @@ class HaRequests():\n @supervisor_call\r\n async def getSuperLogs(self):\r\n url = self.getSupervisorURL().with_path(\"supervisor/logs\")\r\n- async with self.session.get(url, headers=self._getHassioHeaders()) as resp:\r\n+ async with self.session.get(url, headers=self._getAuthHeaders()) as resp:\r\n resp.raise_for_status()\r\n return await resp.text()\r\n \r\n @supervisor_call\r\n async def getCoreLogs(self):\r\n url = self.getSupervisorURL().with_path(\"core/logs\")\r\n- async with self.session.get(url, headers=self._getHassioHeaders()) as resp:\r\n+ async with self.session.get(url, headers=self._getAuthHeaders()) as resp:\r\n resp.raise_for_status()\r\n return await resp.text()\r\n \r\n@@ -219,7 +221,7 @@ class HaRequests():\n \r\n async def getAddonLogo(self, slug: str):\r\n url = self.getSupervisorURL().with_path(\"addons/{0}/icon\".format(slug))\r\n- async with self.session.get(url, headers=self._getHassioHeaders()) as resp:\r\n+ async with self.session.get(url, headers=self._getAuthHeaders()) as resp:\r\n resp.raise_for_status()\r\n return (resp.headers['Content-Type'], await resp.read())\r\n \r\n@@ -232,30 +234,34 @@ class HaRequests():\n # Older versions of the supervisor use a different name for the token.\r\n return os.environ.get(\"HASSIO_TOKEN\")\r\n \r\n- def _getHassioHeaders(self):\r\n- return self._getHaHeaders()\r\n-\r\n- def _getHaHeaders(self):\r\n+ def _getAuthHeaders(self):\r\n return {\r\n 'Authorization': 'Bearer ' + self._getToken()\r\n }\r\n \r\n+ def _altAuthHeaders(self):\r\n+ return {\r\n+ HEADER_TOKEN: self._getToken()\r\n+ }\r\n+\r\n @supervisor_call\r\n async def _getHassioData(self, url: URL) -> Dict[str, Any]:\r\n logger.debug(\"Making Hassio request: \" + str(url))\r\n- return await self._validateHassioReply(await self.session.get(url, headers=self._getHassioHeaders()))\r\n+ return await self._validateHassioReply(await self.session.get(url, headers=self._getAuthHeaders()))\r\n \r\n- async def _postHassioData(self, url: URL, json=None, file=None, data=None, timeout=None) -> Dict[str, Any]:\r\n- return await self._sendHassioData(\"post\", url, json, file, data, timeout)\r\n+ async def _postHassioData(self, url: URL, json=None, file=None, data=None, timeout=None, headers=None) -> Dict[str, Any]:\r\n+ return await self._sendHassioData(\"post\", url, json, file, data, timeout, headers)\r\n \r\n @supervisor_call\r\n- async def _sendHassioData(self, method: str, url: URL, json=None, file=None, data=None, timeout=None) -> Dict[str, Any]:\r\n+ async def _sendHassioData(self, method: str, url: URL, json=None, file=None, data=None, timeout=None, headers=None) -> Dict[str, Any]:\r\n+ if headers is None:\r\n+ headers = self._getAuthHeaders()\r\n logger.debug(\"Making Hassio request: \" + str(url))\r\n- return await self._validateHassioReply(await self.session.request(method, url, headers=self._getHassioHeaders(), json=json, data=data, timeout=timeout))\r\n+ return await self._validateHassioReply(await self.session.request(method, url, headers=headers, json=json, data=data, timeout=timeout))\r\n \r\n async def _postHaData(self, path: str, data: Dict[str, Any]) -> None:\r\n url = self.getSupervisorURL().with_path(\"/core/api/\" + path)\r\n- async with self.session.post(url, headers=self._getHaHeaders(), json=data) as resp:\r\n+ async with self.session.post(url, headers=self._getAuthHeaders(), json=data) as resp:\r\n resp.raise_for_status()\r\n \r\n async def sendNotification(self, title: str, message: str) -> None:\r\n"
},
{
"change_type": "MODIFY",
"old_path": "hassio-google-drive-backup/dev/simulated_supervisor.py",
"new_path": "hassio-google-drive-backup/dev/simulated_supervisor.py",
"diff": "@@ -147,6 +147,8 @@ class SimulatedSupervisor(BaseServer):\n async def _verifyHeader(self, request) -> bool:\n if request.headers.get(\"Authorization\", None) == \"Bearer \" + self._auth_token:\n return\n+ if request.headers.get(\"X-Supervisor-Token\", None) == self._auth_token:\n+ return\n raise HTTPUnauthorized()\n \n async def _getSnapshots(self, request: Request):\n"
}
] |
e3d28a803cdcd1f050dcbf8ea4ee4fa21954caba | miurahr/aqtinstall | 23.07.2021 13:11:43 | MIT License | Use library instead of ad-hoc code
This replaces `pretty_print_combos` with `json.dumps`, and
`compare_combos` with `jsoncomparison.Compare`. | [
{
"change_type": "MODIFY",
"old_path": "ci/generate_combinations.py",
"new_path": "ci/generate_combinations.py",
"diff": "@@ -3,9 +3,10 @@\n import argparse\n import json\n import logging\n-import re\n from pathlib import Path\n-from typing import Dict, Generator, Iterator, List, Optional, Set, Tuple, Union\n+from typing import Dict, Generator, Iterator, List, Optional, Tuple, Union\n+\n+from jsoncomparison import NO_DIFF, Compare\n \n from aqt.exceptions import ArchiveConnectionError, ArchiveDownloadError\n from aqt.helper import Settings, setup_logging\n@@ -153,207 +154,46 @@ def generate_combos(new_archive: List[str]):\n }\n \n \n-def pretty_print_combos(combos: Dict[str, Union[List[Dict], List[str]]]) -> str:\n- \"\"\"\n- Attempts to mimic the formatting of the existing combinations.json.\n- \"\"\"\n-\n- def fmt_dict_entry(entry: Dict, depth: int) -> str:\n- return '{}{{\"os_name\": {:<10} \"target\": {:<10} {}\"arch\": \"{}\"}}'.format(\n- \" \" * depth,\n- f'\"{entry[\"os_name\"]}\",',\n- f'\"{entry[\"target\"]}\",',\n- (\n- f'\"tool_name\": \"{entry[\"tool_name\"]}\", '\n- if \"tool_name\" in entry.keys()\n- else \"\"\n- ),\n- entry[\"arch\"],\n- )\n-\n- def span_multiline(line: str, max_width: int, depth: int) -> str:\n- window = (0, max_width)\n- indent = \" \" * (depth + 1)\n- while len(line) - window[0] > max_width:\n- break_loc = line.rfind(\" \", window[0], window[1])\n- line = line[:break_loc] + \"\\n\" + indent + line[break_loc + 1 :]\n- window = (break_loc + len(indent), break_loc + len(indent) + max_width)\n- return line\n-\n- def fmt_module_entry(entry: Dict, depth: int = 0) -> str:\n- line = '{}{{\"qt_version\": \"{}\", \"modules\": [{}]}}'.format(\n- \" \" * depth,\n- entry[\"qt_version\"],\n- \", \".join([f'\"{s}\"' for s in entry[\"modules\"]]),\n- )\n- return span_multiline(line, 120, depth)\n-\n- def fmt_version_list(entry: List[str], depth: int) -> str:\n- assert isinstance(entry, list)\n- minor_pattern = re.compile(r\"^\\d+\\.(\\d+)(\\.\\d+)?\")\n-\n- def iter_minor_versions():\n- if len(entry) == 0:\n- return\n- begin_index = 0\n- current_minor_ver = int(minor_pattern.match(entry[begin_index]).group(1))\n- for i, ver in enumerate(entry):\n- minor = int(minor_pattern.match(ver).group(1))\n- if minor != current_minor_ver:\n- yield entry[begin_index:i]\n- begin_index = i\n- current_minor_ver = minor\n- yield entry[begin_index:]\n-\n- joiner = \",\\n\" + \" \" * depth\n- line = joiner.join(\n- [\n- \", \".join([f'\"{ver}\"' for ver in minor_group])\n- for minor_group in iter_minor_versions()\n- ]\n- )\n-\n- return line\n-\n- root_element_strings = [\n- f'\"{key}\": [\\n'\n- + \",\\n\".join([item_formatter(item, depth=1) for item in combos[key]])\n- + \"\\n]\"\n- for key, item_formatter in (\n- (\"qt\", fmt_dict_entry),\n- (\"tools\", fmt_dict_entry),\n- (\"modules\", fmt_module_entry),\n- )\n- ] + [\n- f'\"{key}\": [\\n ' + fmt_version_list(combos[key], depth=1) + \"\\n]\"\n- for key in (\"versions\", \"new_archive\")\n- ]\n-\n- return \"[{\" + \", \".join(root_element_strings) + \"}]\"\n-\n-\n-def compare_combos(\n- actual_combos: Dict[str, Union[List[str], List[Dict]]],\n- expected_combos: Dict[str, Union[List[str], List[Dict]]],\n- actual_name: str,\n- expect_name: str,\n-) -> bool:\n- # list_of_str_keys: the values attached to these keys are List[str]\n- list_of_str_keys = \"versions\", \"new_archive\"\n-\n- has_difference = False\n-\n- # Don't compare data pulled from previous file\n- skipped_keys = (\"new_archive\",)\n-\n- def compare_modules_entry(actual_mod_item: Dict, expect_mod_item: Dict) -> bool:\n- \"\"\"Return True if difference detected. Print description of difference.\"\"\"\n- version = actual_mod_item[\"qt_version\"]\n- actual_modules, expect_modules = set(actual_mod_item[\"modules\"]), set(\n- expect_mod_item[\"modules\"]\n- )\n- mods_missing_from_actual = expect_modules - actual_modules\n- mods_missing_from_expect = actual_modules - expect_modules\n- if mods_missing_from_actual:\n- logger.info(\n- f\"{actual_name}['modules'] for Qt {version} is missing {mods_missing_from_actual}\"\n- )\n- if mods_missing_from_expect:\n- logger.info(\n- f\"{expect_name}['modules'] for Qt {version} is missing {mods_missing_from_expect}\"\n- )\n- return bool(mods_missing_from_actual) or bool(mods_missing_from_expect)\n-\n- def to_set(a_list: Union[List[str], List[Dict]]) -> Set:\n- if len(a_list) == 0:\n- return set()\n- if isinstance(a_list[0], str):\n- return set(a_list)\n- assert isinstance(a_list[0], Dict)\n- return set([str(a_dict) for a_dict in a_list])\n-\n- def report_difference(\n- superset: Set, subset: Set, subset_name: str, key: str\n- ) -> bool:\n- \"\"\"Return True if difference detected. Print description of difference.\"\"\"\n- missing_from_superset = sorted(superset - subset)\n- if not missing_from_superset:\n- return False\n- logger.info(f\"{subset_name}['{key}'] is missing these entries:\")\n- if key in list_of_str_keys:\n- logger.info(format(missing_from_superset))\n- return True\n- for el in missing_from_superset:\n- logger.info(format(el))\n- return True\n-\n- for root_key in actual_combos.keys():\n- if root_key in skipped_keys:\n- continue\n-\n- logger.info(f\"\\nComparing {root_key}:\\n{'-' * 40}\")\n- if root_key == \"modules\":\n- for actual_row, expect_row in zip(\n- actual_combos[root_key], expected_combos[root_key]\n- ):\n- assert actual_row[\"qt_version\"] == expect_row[\"qt_version\"]\n- has_difference |= compare_modules_entry(actual_row, expect_row)\n- continue\n-\n- actual_set = to_set(actual_combos[root_key])\n- expected_set = to_set(expected_combos[root_key])\n- has_difference |= report_difference(\n- expected_set, actual_set, actual_name, root_key\n- )\n- has_difference |= report_difference(\n- actual_set, expected_set, expect_name, root_key\n- )\n-\n- return has_difference\n-\n-\n def alphabetize_modules(combos: Dict[str, Union[List[Dict], List[str]]]):\n for i, item in enumerate(combos[\"modules\"]):\n combos[\"modules\"][i][\"modules\"] = sorted(item[\"modules\"])\n \n \n def write_combinations_json(\n- combos: Dict[str, Union[List[Dict], List[str]]],\n+ combos: List[Dict[str, Union[List[Dict], List[str]]]],\n filename: Path,\n- is_use_pretty_print: bool = True,\n ):\n logger.info(f\"Write file {filename}\")\n- json_text = (\n- pretty_print_combos(combos)\n- if is_use_pretty_print\n- else json.dumps([combos], sort_keys=True, indent=2)\n- )\n+ json_text = json.dumps(combos, sort_keys=True, indent=2)\n if filename.write_text(json_text, encoding=\"utf_8\") == 0:\n raise RuntimeError(\"Failed to write file!\")\n \n \n-def main(filename: Path, is_write_file: bool) -> int:\n+def main(filename: Path, is_write_file: bool, is_verbose: bool) -> int:\n try:\n expect = json.loads(filename.read_text())\n alphabetize_modules(expect[0])\n- actual = generate_combos(new_archive=expect[0][\"new_archive\"])\n+ actual = [generate_combos(new_archive=expect[0][\"new_archive\"])]\n+ diff = Compare().check(expect, actual)\n \n- logger.info(\"=\" * 80)\n- logger.info(\"Program Output:\")\n- logger.info(pretty_print_combos(actual))\n+ if is_verbose:\n+ logger.info(\"=\" * 80)\n+ logger.info(\"Program Output:\")\n+ logger.info(json.dumps(actual, sort_keys=True, indent=2))\n \n- logger.info(\"=\" * 80)\n- logger.info(f\"Comparison with existing '{filename}':\")\n- diff = compare_combos(actual, expect[0], \"program_output\", str(filename))\n- logger.info(\"=\" * 80)\n+ logger.info(\"=\" * 80)\n+ logger.info(f\"Comparison with existing '{filename}':\")\n+ logger.info(json.dumps(diff, sort_keys=True, indent=2))\n+ logger.info(\"=\" * 80)\n \n- if not diff:\n- print(f\"{filename} is up to date! No PR is necessary this time!\")\n+ if diff == NO_DIFF:\n+ logger.info(f\"{filename} is up to date! No PR is necessary this time!\")\n return 0 # no difference\n if is_write_file:\n- print(f\"{filename} has changed; writing changes to file...\")\n+ logger.info(f\"{filename} has changed; writing changes to file...\")\n write_combinations_json(actual, filename)\n- return 0 # file written successfully\n+ return 0 # File written successfully\n+ logger.warning(f\"{filename} is out of date, but no changes were written\")\n return 1 # difference reported\n \n except (ArchiveConnectionError, ArchiveDownloadError) as e:\n@@ -391,8 +231,15 @@ if __name__ == \"__main__\":\n help=\"disable progress bars (makes CI logs easier to read)\",\n action=\"store_true\",\n )\n+ parser.add_argument(\n+ \"--verbose\",\n+ help=\"Print a json dump of the new file, and an abbreviated diff with the old file\",\n+ action=\"store_true\",\n+ )\n args = parser.parse_args()\n \n tqdm = get_tqdm(args.no_tqdm)\n \n- exit(main(filename=json_filename, is_write_file=args.write))\n+ exit(\n+ main(filename=json_filename, is_write_file=args.write, is_verbose=args.verbose)\n+ )\n"
}
] |
b62db9ee2ab82514ab217a950dfe35829b20950a | miurahr/aqtinstall | 06.03.2022 18:16:37 | MIT License | Allow `MetadataFactory.fetch_http` to skip sha256
`MetadataFactory.fetch_http` must often download HTML pages, not
Updates.xml files. download.qt.io does not store checksums for these
files, so this particular function must be allowed to download these
pages without using a checksum. | [
{
"change_type": "MODIFY",
"old_path": "aqt/metadata.py",
"new_path": "aqt/metadata.py",
"diff": "@@ -452,7 +452,7 @@ class MetadataFactory:\n \n def fetch_extensions(self, version: Version) -> List[str]:\n versions_extensions = MetadataFactory.get_versions_extensions(\n- self.fetch_http(self.archive_id.to_url()), self.archive_id.category\n+ self.fetch_http(self.archive_id.to_url(), False), self.archive_id.category\n )\n filtered = filter(\n lambda ver_ext: ver_ext[0] == version and ver_ext[1],\n@@ -469,7 +469,7 @@ class MetadataFactory:\n return ver_ext[0]\n \n versions_extensions = MetadataFactory.get_versions_extensions(\n- self.fetch_http(self.archive_id.to_url()), self.archive_id.category\n+ self.fetch_http(self.archive_id.to_url(), False), self.archive_id.category\n )\n versions = sorted(filter(None, map(get_version, filter(filter_by, versions_extensions))))\n iterables = itertools.groupby(versions, lambda version: version.minor)\n@@ -479,7 +479,7 @@ class MetadataFactory:\n return self.fetch_versions().latest()\n \n def fetch_tools(self) -> List[str]:\n- html_doc = self.fetch_http(self.archive_id.to_url())\n+ html_doc = self.fetch_http(self.archive_id.to_url(), False)\n return list(MetadataFactory.iterate_folders(html_doc, \"tools\"))\n \n def fetch_tool_modules(self, tool_name: str) -> List[str]:\n@@ -572,9 +572,9 @@ class MetadataFactory:\n return version\n \n @staticmethod\n- def fetch_http(rest_of_url: str) -> str:\n+ def fetch_http(rest_of_url: str, is_check_hash: bool = True) -> str:\n timeout = (Settings.connection_timeout, Settings.response_timeout)\n- expected_hash = binascii.unhexlify(get_hash(rest_of_url, \"sha256\", timeout))\n+ expected_hash = binascii.unhexlify(get_hash(rest_of_url, \"sha256\", timeout)) if is_check_hash else None\n base_urls = Settings.baseurl, random.choice(Settings.fallbacks)\n for i, base_url in enumerate(base_urls):\n try:\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test_cli.py",
"new_path": "tests/test_cli.py",
"diff": "@@ -96,7 +96,7 @@ def test_cli_determine_qt_version(\n monkeypatch, host, target, arch, version_or_spec: str, expected_version: Version, is_bad_spec: bool\n ):\n _html = (Path(__file__).parent / \"data\" / f\"{host}-{target}.html\").read_text(\"utf-8\")\n- monkeypatch.setattr(MetadataFactory, \"fetch_http\", lambda self, _: _html)\n+ monkeypatch.setattr(MetadataFactory, \"fetch_http\", lambda *args, **kwargs: _html)\n cli = Cli()\n cli._setup_settings()\n \n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test_list.py",
"new_path": "tests/test_list.py",
"diff": "@@ -132,7 +132,7 @@ def spec_regex():\n )\n def test_list_versions_tools(monkeypatch, spec_regex, os_name, target, in_file, expect_out_file):\n _html = (Path(__file__).parent / \"data\" / in_file).read_text(\"utf-8\")\n- monkeypatch.setattr(MetadataFactory, \"fetch_http\", lambda self, _: _html)\n+ monkeypatch.setattr(MetadataFactory, \"fetch_http\", lambda *args, **kwargs: _html)\n \n expected = json.loads((Path(__file__).parent / \"data\" / expect_out_file).read_text(\"utf-8\"))\n \n@@ -434,7 +434,7 @@ def test_list_qt_cli(\n expect_set = expect\n assert isinstance(expect_set, set)\n \n- def _mock_fetch_http(_, rest_of_url: str) -> str:\n+ def _mock_fetch_http(_, rest_of_url, *args, **kwargs: str) -> str:\n htmltext = (Path(__file__).parent / \"data\" / htmlfile).read_text(\"utf-8\")\n if not rest_of_url.endswith(\"Updates.xml\"):\n return htmltext\n@@ -723,7 +723,7 @@ def test_list_describe_filters(meta: MetadataFactory, expect: str):\n )\n def test_list_to_version(monkeypatch, archive_id, spec, version_str, expect):\n _html = (Path(__file__).parent / \"data\" / \"mac-desktop.html\").read_text(\"utf-8\")\n- monkeypatch.setattr(MetadataFactory, \"fetch_http\", lambda self, _: _html)\n+ monkeypatch.setattr(MetadataFactory, \"fetch_http\", lambda *args, **kwargs: _html)\n \n if isinstance(expect, Exception):\n with pytest.raises(CliInputError) as error:\n@@ -847,7 +847,7 @@ def test_show_list_versions(monkeypatch, capsys):\n \n def test_show_list_tools(monkeypatch, capsys):\n page = (Path(__file__).parent / \"data\" / \"mac-desktop.html\").read_text(\"utf-8\")\n- monkeypatch.setattr(MetadataFactory, \"fetch_http\", lambda self, _: page)\n+ monkeypatch.setattr(MetadataFactory, \"fetch_http\", lambda *args, **kwargs: page)\n \n expect_file = Path(__file__).parent / \"data\" / \"mac-desktop-expect.json\"\n expect = \"\\n\".join(json.loads(expect_file.read_text(\"utf-8\"))[\"tools\"]) + \"\\n\"\n@@ -918,7 +918,7 @@ def test_list_tool_cli(monkeypatch, capsys, host: str, target: str, tool_name: s\n xml_data = json.loads(xmljson)\n expected_tool_modules = set(xml_data[\"modules\"])\n \n- def _mock_fetch_http(_, rest_of_url: str) -> str:\n+ def _mock_fetch_http(_, rest_of_url, *args, **kwargs: str) -> str:\n if not rest_of_url.endswith(\"Updates.xml\"):\n return htmltext\n folder = urlparse(rest_of_url).path.split(\"/\")[-2]\n"
}
] |
dc45839df46282db68817df7c991b91ab48e9d11 | miurahr/aqtinstall | 20.03.2022 14:03:10 | MIT License | Fix implementation of `helper.get_hash`
Causes `get_hash` to verify the hash length, and check that the hash can
be unhexlified properly.
Fixes the interface for `get_hash` so that the caller does not have to
run `binascii.unhexlify` or verify its output. | [
{
"change_type": "MODIFY",
"old_path": "aqt/archives.py",
"new_path": "aqt/archives.py",
"diff": "@@ -19,7 +19,6 @@\n # COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER\n # IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\n # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n-import binascii\n import posixpath\n from dataclasses import dataclass, field\n from logging import getLogger\n@@ -27,7 +26,7 @@ from typing import Dict, Iterable, List, Optional, Tuple\n \n from defusedxml import ElementTree\n \n-from aqt.exceptions import ArchiveDownloadError, ArchiveListError, ChecksumDownloadFailure, NoPackageFound\n+from aqt.exceptions import ArchiveDownloadError, ArchiveListError, NoPackageFound\n from aqt.helper import Settings, get_hash, getUrl, ssplit\n from aqt.metadata import QtRepoProperty, Version\n \n@@ -231,9 +230,7 @@ class QtArchives:\n \n def _download_update_xml(self, update_xml_path):\n \"\"\"Hook for unit test.\"\"\"\n- xml_hash = binascii.unhexlify(get_hash(update_xml_path, \"sha256\", self.timeout))\n- if xml_hash == \"\":\n- raise ChecksumDownloadFailure(f\"Checksum for '{update_xml_path}' is empty\")\n+ xml_hash = get_hash(update_xml_path, \"sha256\", self.timeout)\n update_xml_text = getUrl(posixpath.join(self.base, update_xml_path), self.timeout, xml_hash)\n self.update_xml_text = update_xml_text\n \n"
},
{
"change_type": "MODIFY",
"old_path": "aqt/helper.py",
"new_path": "aqt/helper.py",
"diff": "@@ -18,7 +18,7 @@\n # COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER\n # IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\n # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n-\n+import binascii\n import configparser\n import hashlib\n import json\n@@ -176,16 +176,28 @@ def iter_list_reps(_list: List, num_reps: int) -> Generator:\n list_index = 0\n \n \n-def get_hash(archive_path: str, algorithm: str, timeout) -> str:\n+def get_hash(archive_path: str, algorithm: str, timeout) -> bytes:\n+ \"\"\"\n+ Downloads a checksum and unhexlifies it to a `bytes` object, guaranteed to be the right length.\n+ Raises ChecksumDownloadFailure if the download failed, or if the checksum was un unexpected length.\n+\n+ :param archive_path: The path to the file that we want to check, not the path to the checksum.\n+ :param algorithm: sha256 is the only safe value to use here.\n+ :param timeout: The timeout used by getUrl.\n+ :return: A checksum in `bytes`\n+ \"\"\"\n logger = getLogger(\"aqt.helper\")\n+ hash_lengths = {\"sha256\": 64, \"sha1\": 40, \"md5\": 32}\n for base_url in iter_list_reps(Settings.trusted_mirrors, Settings.max_retries_to_retrieve_hash):\n url = posixpath.join(base_url, f\"{archive_path}.{algorithm}\")\n logger.debug(f\"Attempt to download checksum at {url}\")\n try:\n r = getUrl(url, timeout)\n # sha256 & md5 files are: \"some_hash archive_filename\"\n- return r.split(\" \")[0]\n- except (ArchiveConnectionError, ArchiveDownloadError):\n+ _hash = r.split(\" \")[0]\n+ if len(_hash) == hash_lengths[algorithm]:\n+ return binascii.unhexlify(_hash)\n+ except (ArchiveConnectionError, ArchiveDownloadError, binascii.Incomplete, binascii.Error):\n pass\n filename = archive_path.split(\"/\")[-1]\n raise ChecksumDownloadFailure(\n"
},
{
"change_type": "MODIFY",
"old_path": "aqt/installer.py",
"new_path": "aqt/installer.py",
"diff": "@@ -22,7 +22,6 @@\n # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n \n import argparse\n-import binascii\n import gc\n import multiprocessing\n import os\n@@ -1018,7 +1017,7 @@ def installer(\n timeout = (Settings.connection_timeout, Settings.response_timeout)\n else:\n timeout = (Settings.connection_timeout, response_timeout)\n- hash = binascii.unhexlify(get_hash(qt_package.archive_path, algorithm=\"sha256\", timeout=timeout))\n+ hash = get_hash(qt_package.archive_path, algorithm=\"sha256\", timeout=timeout)\n \n def download_bin(_base_url):\n url = posixpath.join(_base_url, qt_package.archive_path)\n"
},
{
"change_type": "MODIFY",
"old_path": "aqt/metadata.py",
"new_path": "aqt/metadata.py",
"diff": "@@ -18,7 +18,6 @@\n # COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER\n # IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\n # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n-import binascii\n import itertools\n import operator\n import posixpath\n@@ -574,7 +573,7 @@ class MetadataFactory:\n @staticmethod\n def fetch_http(rest_of_url: str, is_check_hash: bool = True) -> str:\n timeout = (Settings.connection_timeout, Settings.response_timeout)\n- expected_hash = binascii.unhexlify(get_hash(rest_of_url, \"sha256\", timeout)) if is_check_hash else None\n+ expected_hash = get_hash(rest_of_url, \"sha256\", timeout) if is_check_hash else None\n base_urls = Settings.baseurl, random.choice(Settings.fallbacks)\n for i, base_url in enumerate(base_urls):\n try:\n"
}
] |
ed6cf7696871e01685c1c96d6d13deb6439ae490 | miurahr/aqtinstall | 24.07.2022 00:30:16 | MIT License | Add CLI option to install desktop qt automatically
This change also causes aqt to emit a warning when the option is not
turned on and the expected desktop Qt is not found. | [
{
"change_type": "MODIFY",
"old_path": "aqt/installer.py",
"new_path": "aqt/installer.py",
"diff": "@@ -299,6 +299,7 @@ class Cli:\n if modules is not None and archives is not None:\n archives.append(modules)\n nopatch = args.noarchives or (archives is not None and \"qtbase\" not in archives) # type: bool\n+ warn_on_missing_desktop_qt: bool = not args.autodesktop\n if not self._check_qt_arg_versions(qt_version):\n self.logger.warning(\"Specified Qt version is unknown: {}.\".format(qt_version))\n if not self._check_qt_arg_combination(qt_version, os_name, target, arch):\n@@ -328,6 +329,7 @@ class Cli:\n with TemporaryDirectory() as temp_dir:\n _archive_dest = Cli.choose_archive_dest(archive_dest, keep, temp_dir)\n run_installer(qt_archives.get_packages(), base_dir, sevenzip, keep, _archive_dest)\n+ self._handle_missing_desktop_qt(os_name, target, Version(qt_version), Path(base_dir), warn_on_missing_desktop_qt)\n if not nopatch:\n Updater.update(target_config, base_dir)\n self.logger.info(\"Finished installation\")\n@@ -603,6 +605,12 @@ class Cli:\n action=\"store_true\",\n help=\"No base packages; allow mod amendment with --modules option.\",\n )\n+ install_qt_parser.add_argument(\n+ \"--autodesktop\",\n+ action=\"store_true\",\n+ help=\"For android/ios installations, a desktop Qt installation is required. \"\n+ \"When enabled, this option installs the required desktop version automatically.\",\n+ )\n \n def _set_install_tool_parser(self, install_tool_parser, *, is_legacy: bool):\n install_tool_parser.set_defaults(func=self.run_install_tool, is_legacy=is_legacy)\n@@ -640,6 +648,43 @@ class Cli:\n f\"In the future, please omit this parameter.\"\n )\n \n+ @staticmethod\n+ def _get_missing_desktop_arch(host: str, target: str, version: Version, base_dir: Path) -> Optional[str]:\n+ \"\"\"\n+ For mobile Qt installations, the desktop version of Qt is a dependency.\n+ If the desktop version is not installed, this function returns the architecture that should be installed.\n+ If no desktop Qt is required, or it is already installed, this function returns None.\n+ \"\"\"\n+ if target not in [\"ios\", \"android\"]:\n+ return None\n+ if host != \"windows\":\n+ arch = aqt.updater.default_desktop_arch_dir(host, version)\n+ expected_qmake = base_dir / format(version) / arch / \"bin/qmake\"\n+ return arch if not expected_qmake.is_file() else None\n+ else:\n+ existing_desktop_qt = QtRepoProperty.find_installed_qt_mingw_dir(base_dir / format(version))\n+ if existing_desktop_qt:\n+ return None\n+ return MetadataFactory(ArchiveId(\"qt\", host, \"desktop\")).fetch_default_desktop_arch(version)\n+\n+ def _handle_missing_desktop_qt(self, host: str, target: str, version: Version, base_dir: Path, should_warn: bool):\n+ missing_desktop_arch = Cli._get_missing_desktop_arch(host, target, version, base_dir)\n+ if not missing_desktop_arch:\n+ return\n+\n+ msg_prefix = (\n+ f\"You are installing the {target} version of Qt, which requires that the desktop version of Qt \"\n+ f\"is also installed.\"\n+ )\n+ if should_warn:\n+ self.logger.warning(\n+ f\"{msg_prefix} You can install it with the following command:\\n\"\n+ f\" `aqt install-qt {host} desktop {version} {missing_desktop_arch}`\"\n+ )\n+ else:\n+ self.logger.info(f\"{msg_prefix} Now installing Qt: desktop {version} {missing_desktop_arch}\")\n+ self.run([\"install-qt\", host, \"desktop\", format(version), missing_desktop_arch])\n+\n def _make_all_parsers(self, subparsers: argparse._SubParsersAction):\n deprecated_msg = \"This command is deprecated and marked for removal in a future version of aqt.\"\n \n"
},
{
"change_type": "MODIFY",
"old_path": "tests/test_install.py",
"new_path": "tests/test_install.py",
"diff": "@@ -576,6 +576,9 @@ def tool_archive(host: str, tool_name: str, variant: str, date: datetime = datet\n r\"^INFO : aqtinstall\\(aqt\\) v.* on Python 3.*\\n\"\n r\"INFO : Downloading qtbase...\\n\"\n r\"Finished installation of qtbase-windows-android_armv7.7z in .*\\n\"\n+ r\"WARNING : You are installing the android version of Qt, which requires that the desktop version of \"\n+ r\"Qt is also installed. You can install it with the following command:\\n\"\n+ r\" `aqt install-qt windows desktop 6.1.0 MINGW_MOCK_DEFAULT`\\n\"\n r\"INFO : Patching .*6\\.1\\.0[/\\\\]android_armv7[/\\\\]bin[/\\\\]qmake.bat\\n\"\n r\"INFO : Finished installation\\n\"\n r\"INFO : Time elapsed: .* second\"\n@@ -631,6 +634,9 @@ def tool_archive(host: str, tool_name: str, variant: str, date: datetime = datet\n r\"^INFO : aqtinstall\\(aqt\\) v.* on Python 3.*\\n\"\n r\"INFO : Downloading qtbase...\\n\"\n r\"Finished installation of qtbase-linux-android_arm64_v8a.7z in .*\\n\"\n+ r\"WARNING : You are installing the android version of Qt, which requires that the desktop version of \"\n+ r\"Qt is also installed. You can install it with the following command:\\n\"\n+ r\" `aqt install-qt linux desktop 6\\.3\\.0 gcc_64`\\n\"\n r\"INFO : Patching .*6\\.3\\.0[/\\\\]android_arm64_v8a[/\\\\]bin[/\\\\]qmake\\n\"\n r\"INFO : Finished installation\\n\"\n r\"INFO : Time elapsed: .* second\"\n@@ -686,6 +692,9 @@ def tool_archive(host: str, tool_name: str, variant: str, date: datetime = datet\n r\"^INFO : aqtinstall\\(aqt\\) v.* on Python 3.*\\n\"\n r\"INFO : Downloading qtbase...\\n\"\n r\"Finished installation of qtbase-mac-ios.7z in .*\\n\"\n+ r\"WARNING : You are installing the ios version of Qt, which requires that the desktop version of Qt is \"\n+ r\"also installed. You can install it with the following command:\\n\"\n+ r\" `aqt install-qt mac desktop 6\\.1\\.2 macos`\\n\"\n r\"INFO : Patching .*6\\.1\\.2[/\\\\]ios[/\\\\]bin[/\\\\]qmake\\n\"\n r\"INFO : Finished installation\\n\"\n r\"INFO : Time elapsed: .* second\"\n@@ -716,6 +725,7 @@ def test_install(\n monkeypatch.setattr(\"aqt.archives.getUrl\", mock_get_url)\n monkeypatch.setattr(\"aqt.helper.getUrl\", mock_get_url)\n monkeypatch.setattr(\"aqt.installer.downloadBinaryFile\", mock_download_archive)\n+ monkeypatch.setattr(\"aqt.metadata.MetadataFactory.fetch_default_desktop_arch\", lambda *args: \"MINGW_MOCK_DEFAULT\")\n \n with TemporaryDirectory() as output_dir:\n cli = Cli()\n"
}
] |
75a13309377844662d8be00fe1fbf9b6112e9db9 | projectcalico/calicoctl | 28.03.2017 15:43:20 | Apache License 2.0 | Add test to simulate GCE instance setup
This change adds a test that sets up hosts' addressing and routing as it
is on a GCE instance, and checks that we still get connectivity between
workloads on different hosts. | [
{
"change_type": "MODIFY",
"old_path": "tests/st/policy/test_profile.py",
"new_path": "tests/st/policy/test_profile.py",
"diff": "@@ -30,7 +30,8 @@ POST_DOCKER_COMMANDS = [\"docker load -i /code/calico-node.tar\",\n class MultiHostMainline(TestBase):\n @parameterized.expand([\n #\"tags\",\n- \"rules.tags\",\n+ (\"rules.tags\", False),\n+ (\"rules.tags\", True),\n #\"rules.protocol.icmp\",\n #\"rules.ip.addr\",\n #\"rules.ip.net\",\n@@ -38,7 +39,7 @@ class MultiHostMainline(TestBase):\n #\"rules.tcp.port\",\n #\"rules.udp.port\",\n ])\n- def test_multi_host(self, test_type):\n+ def test_multi_host(self, test_type, simulate_gce_routing):\n \"\"\"\n Run a mainline multi-host test.\n Because multihost tests are slow to setup, this tests most mainline\n@@ -58,13 +59,15 @@ class MultiHostMainline(TestBase):\n with DockerHost(\"host1\",\n additional_docker_options=CLUSTER_STORE_DOCKER_OPTIONS,\n post_docker_commands=POST_DOCKER_COMMANDS,\n+ simulate_gce_routing=simulate_gce_routing,\n start_calico=False) as host1, \\\n DockerHost(\"host2\",\n additional_docker_options=CLUSTER_STORE_DOCKER_OPTIONS,\n post_docker_commands=POST_DOCKER_COMMANDS,\n+ simulate_gce_routing=simulate_gce_routing,\n start_calico=False) as host2:\n (n1_workloads, n2_workloads, networks) = \\\n- self._setup_workloads(host1, host2)\n+ self._setup_workloads(host1, host2, simulate_gce_routing)\n \n # Get the original profiles:\n output = host1.calicoctl(\"get profile -o yaml\")\n@@ -246,11 +249,26 @@ class MultiHostMainline(TestBase):\n yaml.dump(new_profile, default_flow_style=False))\n host.calicoctl(\"apply -f new_profiles\")\n \n- def _setup_workloads(self, host1, host2):\n+ def _setup_workloads(self, host1, host2, simulate_gce_routing):\n # TODO work IPv6 into this test too\n host1.start_calico_node()\n host2.start_calico_node()\n \n+ if simulate_gce_routing:\n+ # We are simulating GCE instance routing, where there is a router\n+ # between the instances, and each instance has a /32 address that\n+ # appears not to be directly connected to any subnet. Hence we\n+ # need to enable IP-in-IP to get from one host to the other.\n+ for host in [host1, host2]:\n+ pools_output = host.calicoctl(\"get ippool -o yaml\")\n+ pools_dict = yaml.safe_load(pools_output)\n+ for pool in pools_dict:\n+ print \"Pool is %s\" % pool\n+ if ':' not in pool['metadata']['cidr']:\n+ pool['spec']['ipip'] = {'mode': 'always', 'enabled': True}\n+ host.writefile(\"ippools.yaml\", pools_dict)\n+ host.calicoctl(\"apply -f ippools.yaml\")\n+\n # Create the networks on host1, but it should be usable from all\n # hosts. We create one network using the default driver, and the\n # other using the Calico driver.\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/st/utils/docker_host.py",
"new_path": "tests/st/utils/docker_host.py",
"diff": "@@ -70,6 +70,7 @@ class DockerHost(object):\n post_docker_commands=[\"docker load -i /code/calico-node.tar\",\n \"docker load -i /code/busybox.tar\"],\n calico_node_autodetect_ip=False,\n+ simulate_gce_routing=False,\n override_hostname=False):\n self.name = name\n self.dind = dind\n@@ -127,6 +128,33 @@ class DockerHost(object):\n docker_ps = partial(self.execute, \"docker ps\")\n retry_until_success(docker_ps, ex_class=CalledProcessError,\n retries=10)\n+\n+ if simulate_gce_routing:\n+ # Simulate addressing and routing setup as on a GCE instance:\n+ # the instance has a /32 address (which means that it appears\n+ # not to be directly connected to anything) and a default route\n+ # that does not have the 'onlink' flag to override that.\n+ #\n+ # First check that we can ping the Docker bridge, and trace out\n+ # initial state.\n+ self.execute(\"ping -c 1 -W 2 172.17.0.1\")\n+ self.execute(\"ip a\")\n+ self.execute(\"ip r\")\n+\n+ # Change the normal /16 IP address to /32.\n+ self.execute(\"ip a del %s/16 dev eth0\" % self.ip)\n+ self.execute(\"ip a add %s/32 dev eth0\" % self.ip)\n+\n+ # Add a default route via the Docker bridge.\n+ self.execute(\"ip r a 172.17.0.1 dev eth0\")\n+ self.execute(\"ip r a default via 172.17.0.1 dev eth0\")\n+\n+ # Trace out final state, and check that we can still ping the\n+ # Docker bridge.\n+ self.execute(\"ip a\")\n+ self.execute(\"ip r\")\n+ self.execute(\"ping -c 1 -W 2 172.17.0.1\")\n+\n for command in post_docker_commands:\n self.execute(command)\n elif not calico_node_autodetect_ip:\n"
}
] |
0f210ba5b311e2c25b02b8123c0805a0bc872c17 | openshift/openshift-tools | 13.01.2017 15:36:51 | Apache License 2.0 | Allow tests to be run on demand
With this change, a string "[test]" can be added as a comment to a pull
request by a whitelisted user to initiate testing on a pull request | [
{
"change_type": "MODIFY",
"old_path": "jenkins/Jenkinsfile",
"new_path": "jenkins/Jenkinsfile",
"diff": "@@ -2,22 +2,49 @@\n \n node {\n stage \"Check PR Action\"\n+ // Parse json payload\n+ def test_key = \"[test]\"\n def slurper = new groovy.json.JsonSlurper()\n def webhook = slurper.parseText(payload)\n def trigger = \"\"\n+\n // The following are the actions we should test:\n // \"opened\", \"reopened\", \"synchronize\"\n // We should build if the action is \"closed\" and the \"merged\" flag is true\n // The \"edited\" action is when a comment is edited, we don't care about that.\n- echo webhook.action\n- if (webhook.action == \"opened\" || webhook.action == \"reopened\" || webhook.action == \"synchronize\") {\n+\n+ // Additionally, to support re-testing via a comment made on the PR, we should test\n+ // on the following issue-only actions:\n+ // created, edited\n+ // These will requier some additional verification, as tests should only commence if\n+ // the comment was made on an open pull request and includes a certain phrase.\n+\n+ def action = webhook.action\n+ echo \"Webhook payload action: ${action}\"\n+ if (action == \"opened\" || action == \"reopened\" || action == \"synchronize\") {\n+ echo \"Pull request has been opened or modified, testing...\"\n trigger = \"test\"\n- } else if (webhook.action == \"closed\" && webhook.pull_request.merged) {\n+ } else if (action == \"closed\" && webhook.pull_request.merged) {\n+ echo \"Pull request has been merged, running builds...\"\n trigger = \"build\"\n+ } else if (action == \"created\" || action == \"edited\") {\n+ if (webhook.issue && webhook.issue.containsKey(\"pull_request\")) {\n+ body = webhook.comment.body\n+ if (body.toLowerCase().contains(test_key)) {\n+ echo \"Pull request comment contains '${test_key}', running tests...\"\n+ trigger = \"test\"\n+ } else {\n+ echo \"Pull request comment does not contain '${test_key}'. Ignoring...\"\n+ }\n+ } else {\n+ echo \"Comment made on issue, not pull request. Ignoring...\"\n+ }\n }\n+ echo \"Trigger: ${trigger}\"\n // These variables must be nullified as they are not serializable\n // See http://stackoverflow.com/questions/37864542/jenkins-pipeline-notserializableexception-groovy-json-internal-lazymap\n slurper = null\n+ webhook = null\n \n if (trigger != \"\") {\n if (trigger == \"test\") {\n@@ -28,15 +55,15 @@ node {\n stage \"Build RPMS\"\n echo \"Starting Build\"\n // TODO this is not yet implemented\n-\t\t\t\t\t\tstage \"Deploy Updates\"\n-\t\t\t\t\t\t\t\techo \"Deploying updated RPMs\"\n+ stage \"Deploy Updates\"\n+ echo \"Deploying updated RPMs\"\n // TODO this is not yet implemented\n } else {\n echo \"Trigger ${trigger} not recognized\"\n currentBuild.result = 'FAILURE'\n }\n } else {\n- echo \"Pull request action, ${webhook.action}, does not justify running any jobs.\"\n+ echo \"Webhook action, ${action}, does not justify running any jobs.\"\n currentBuild.result = 'SUCCESS'\n }\n }\n"
},
{
"change_type": "MODIFY",
"old_path": "jenkins/test/run_tests.py",
"new_path": "jenkins/test/run_tests.py",
"diff": "@@ -39,8 +39,12 @@ EXCLUDES = [\n \"common.py\",\n \".pylintrc\"\n ]\n+# The relative path to the testing validator scripts\n VALIDATOR_PATH = \"jenkins/test/validators/\"\n+# The github API base url\n GITHUB_API_URL = \"https://api.github.com\"\n+# The string to accept in PR comments to initiate testing by a whitelisted user\n+TEST_STRING = \"[test]\"\n \n def run_cli_cmd(cmd, exit_on_fail=True):\n '''Run a command and return its output'''\n@@ -161,17 +165,67 @@ def get_github_credentials():\n token_file.close()\n return username, token\n \n-def get_user_whitelist():\n- \"\"\" Get the user whitelist for testing from mounted secret volume \"\"\"\n+def check_user_whitelist(payload):\n+ \"\"\" Get and check the user whitelist for testing from mounted secret volume \"\"\"\n+ # Get user from payload\n+ user = \"\"\n+ comment_made = False\n+ if \"pull_request\" in payload:\n+ user = payload[\"pull_request\"][\"user\"][\"login\"]\n+ elif \"comment\" in payload:\n+ user = payload[\"comment\"][\"user\"][\"login\"]\n+ comment_made = True\n+ else:\n+ print \"Webhook payload does not include pull request user or issue comment user data\"\n+ sys.exit(1)\n+\n+ if comment_made:\n+ body = payload[\"comment\"][\"body\"]\n+ if not \"[test]\" in body.split(\" \"):\n+ print \"Pull request coment does not include test string \\\"\" + TEST_STRING +\"\\\"\"\n+ # Exit success here so that the jenkins job is marked as a success,\n+ # since no actual error occurred, the expected has happened\n+ sys.exit(0)\n+\n+ # Get secret informatino from env variable\n secret_dir = os.getenv(\"WHITELIST_SECRET_DIR\")\n if secret_dir == \"\":\n print \"ERROR: $WHITELIST_SECRET_DIR undefined. This variable should exist and\" + \\\n \" should point to the mounted volume containing the admin whitelist\"\n sys.exit(2)\n+ # Extract whitelist from secret volume\n whitelist_file = open(os.path.join(\"/\", secret_dir, \"whitelist\"), \"r\")\n whitelist = whitelist_file.read()\n whitelist_file.close()\n- return whitelist\n+ if whitelist == \"\" or user not in whitelist.split(\",\"):\n+ print \"WARN: User \" + user + \" not in admin whitelist.\"\n+ # exit success here so that the jenkins job is marked as a success,\n+ # since no actual error occured, the expected has happened\n+ sys.exit(0)\n+\n+def get_pull_request_info(payload):\n+ \"\"\" Get the relevant pull request details for this webhook payload \"\"\"\n+ if \"pull_request\" in payload:\n+ return payload[\"pull_request\"]\n+\n+ if not \"issue\" in payload:\n+ print \"Webhook payload does not include pull request or issue data\"\n+ sys.exit(1)\n+ if not \"pull_request\" in payload[\"issue\"]:\n+ print \"Webhook payload is for an issue comment, not pull request.\"\n+ sys.exit(1)\n+\n+ pull_request_url = payload[\"issue\"][\"pull_request\"][\"url\"]\n+ response = requests.get(pull_request_url)\n+ response.raise_for_status()\n+ pull_request_json = response.text\n+ try:\n+ pull_request = json.loads(pull_request_json, parse_int=str, parse_float=str)\n+ except ValueError as error:\n+ print \"Unable to load JSON data from \" + pull_request_url\n+ print error\n+ sys.exit(1)\n+ return pull_request\n \n def main():\n \"\"\" Get the payload, merge changes, assign env, and run validators \"\"\"\n@@ -186,16 +240,12 @@ def main():\n print \"Unable to load JSON data from $GITHUB_WEBHOOK_PAYLOAD:\"\n print error\n sys.exit(1)\n- pull_request = payload[\"pull_request\"]\n \n # Check to ensure the user submitting the changes is on the whitelist\n- user = pull_request[\"user\"][\"login\"]\n- whitelist = get_user_whitelist()\n- if whitelist == \"\" or user not in whitelist.split(\",\"):\n- print \"WARN: User \" + user + \" not in admin whitelist.\"\n- # exit success here so that the jenkins job is marked as a success,\n- # since no actual error occured\n- sys.exit(0)\n+ check_user_whitelist(payload)\n+\n+ # Extract or get the pull request information from the payload\n+ pull_request = get_pull_request_info(payload)\n \n remote_sha = pull_request[\"head\"][\"sha\"]\n pull_id = pull_request[\"number\"]\n"
}
] |
a72eda6c40cf000f172d7e11dd5a4b67fac855a0 | openshift/openshift-tools | 14.02.2017 17:49:29 | Apache License 2.0 | Refactor to update pull requests with pending status
This commit refactors code to update the pull request with a pending
status at the earliest possible time.
Additionally, this refactor includes fixes an issue where unexpected
characters in the pull request title or body caused issues parsing
github webhook payload json | [
{
"change_type": "MODIFY",
"old_path": "jenkins/Jenkinsfile",
"new_path": "jenkins/Jenkinsfile",
"diff": "@@ -5,8 +5,10 @@ node {\n // Parse json payload\n def test_key = \"[test]\"\n def slurper = new groovy.json.JsonSlurper()\n+ def jout = new groovy.json.JsonOutput()\n def webhook = slurper.parseText(payload)\n- def trigger = \"\"\n+ def trigger\n+ def pull_request\n \n // The following are the actions we should test:\n // \"opened\", \"reopened\", \"synchronize\"\n@@ -19,19 +21,25 @@ node {\n // These will requier some additional verification, as tests should only commence if\n // the comment was made on an open pull request and includes a certain phrase.\n \n+ // The following block determines the action (trigger) to take depending on the action field in the webhook\n def action = webhook.action\n echo \"Webhook payload action: ${action}\"\n if (action == \"opened\" || action == \"reopened\" || action == \"synchronize\") {\n echo \"Pull request has been opened or modified, testing...\"\n+ pull_request = webhook.pull_request\n trigger = \"test\"\n } else if (action == \"closed\" && webhook.pull_request.merged) {\n echo \"Pull request has been merged, running builds...\"\n+ pull_request = webhook.pull_request\n trigger = \"build\"\n } else if (action == \"created\" || action == \"edited\") {\n if (webhook.issue && webhook.issue.containsKey(\"pull_request\")) {\n body = webhook.comment.body\n if (body.toLowerCase().contains(test_key)) {\n echo \"Pull request comment contains '${test_key}', running tests...\"\n+ // The webhook only contains the comment and issue details. We need to get the pull request\n+ pr_json = get_pr(webhook.issue.pull_request.url)\n+ pull_request = slurper.parseText(pr_json)\n trigger = \"test\"\n } else {\n echo \"Pull request comment does not contain '${test_key}'. Ignoring...\"\n@@ -41,16 +49,33 @@ node {\n }\n }\n echo \"Trigger: ${trigger}\"\n+\n+ // Unset the pull request title and body in the pull request json and extract them into their own variables\n+ // This is done to avoid possible issues in parsing the json when unexpected characters are used in the title and body\n+ title = pull_request.title\n+ body = pull_request.body\n+ pull_request.title = \"\"\n+ pull_request.body = \"\"\n+\n+ // Get sha and repo to submit pr update\n+ sha = pull_request.head.sha\n+ repo = pull_request.base.repo.full_name\n+\n+ // Get the json string representation of the updated pull request data to pass to openshift\n+ pull_request_string = jout.toJson(pull_request)\n+\n // These variables must be nullified as they are not serializable\n // See http://stackoverflow.com/questions/37864542/jenkins-pipeline-notserializableexception-groovy-json-internal-lazymap\n slurper = null\n+ jout = null\n webhook = null\n+ pull_request = null\n \n+ // From the determined trigger, initiate tests or builds\n if (trigger != \"\") {\n if (trigger == \"test\") {\n stage \"Test Changes\"\n- echo \"Starting Tests\"\n- openshiftBuild buildConfig: 'openshift-tools-test', env: [[ name: 'GITHUB_WEBHOOK_PAYLOAD', value: payload ], [ name: 'BUILD_URL', value: env.BUILD_URL ]], showBuildLogs: true\n+ run_tests(pull_request_string, sha, repo)\n } else if (trigger == \"build\") {\n stage \"Build RPMS\"\n echo \"Starting Build\"\n@@ -67,3 +92,59 @@ node {\n currentBuild.result = 'SUCCESS'\n }\n }\n+\n+// Run pull request testing\n+def run_tests(pull_request_string, sha, repo) {\n+ echo \"Starting Tests\"\n+ update_pr_status(\"pending\", \"Automated tests in progress\", sha, repo)\n+ // TODO we could probably handle failures here a bit better\n+ // We should do a try/catch so that when the build fails, we update the PR with a failure statues.\n+ // If we do that, we need to make sure that the script only ever exits with a non-zero exit code when\n+ // a failure occurred and the PR was NOT updated with a failure status.\n+ try {\n+ openshiftBuild buildConfig: 'openshift-tools-test', env: [[ name: 'PULL_REQUEST', value: pull_request_string ], [ name: 'BUILD_URL', value: env.BUILD_URL ]], showBuildLogs: true\n+ } catch(Exception e) {\n+ println \"Build failed in a way that likely didn't update PR status!\"\n+ update_pr_status(\"failure\", \"Automated tests failed to run\", sha, repo)\n+ currentBuild.result = 'FAILURE'\n+ }\n+}\n+\n+// Update the status of a pull request\n+def update_pr_status(state, text, sha, repo) {\n+ String token = new File('/openshift-ops-bot/token').text\n+ \n+ def target_url = env.BUILD_URL\n+ def urlString = \"https://api.github.com/repos/\" + repo + \"/statuses/\" + sha\n+ def url = new URL(urlString)\n+ def conn = url.openConnection()\n+ \n+ conn.setRequestMethod(\"POST\")\n+ conn.setRequestProperty(\"Content-Type\", \"application/json\")\n+ conn.setRequestProperty(\"Authorization\", \"token \" + token)\n+ def data = '{\"state\": \"' + state + '\", \"description\": \"' + text + '\", \"target_url\": \"' +\n+ target_url + '\", \"context\": \"jenkins-ci\"}'\n+ \n+ println \"Updating ${sha} in github with state ${state} and description ${text}...\" \n+ conn.doOutput = true\n+ def writer = new OutputStreamWriter(conn.outputStream)\n+ writer.write(data)\n+ writer.flush()\n+ writer.close()\n+ conn.connect()\n+ println \"Posted PR status update with response: ${conn.responseCode} ${conn.responseMessage}\"\n+}\n+\n+// Get a pull request\n+def get_pr(urlString) {\n+ String token = new File('/openshift-ops-bot/token').text\n+ def url = new URL(urlString)\n+ def conn = url.openConnection()\n+\n+ conn.setRequestMethod(\"GET\")\n+ conn.setRequestProperty(\"Authorization\", \"token \" + token)\n+ conn.connect()\n+\n+ assert conn.responseCode == 200\n+ return conn.content.text\n+}\n"
},
{
"change_type": "MODIFY",
"old_path": "jenkins/README.md",
"new_path": "jenkins/README.md",
"diff": "@@ -27,26 +27,32 @@ The script will then submit a comment to the pull request indiciating whether al\n \n 2. Deploy the builds and secrets in the openshift environment using the `openshift-tools-pr-automation-template.json` in this directory. A username and oauth token for a github user will be required. The user and org whitelists will need to be specified with at least one user or org that will be allowed to run tests.\n \n-3. Log into the jenkins instance using the default credentials. Navigate to 'Manage Jenkins' > 'Manage Users' and click the 'config' icon for the admin user. Change the admin users password to something much more secure.\n+3. Add the openshift-ops-bot secret to the jenkins deployment to allow the jenkins pipeline access to the bots credentials. The pipeline needs this data to be mounted at /openshift-ops-bot to post the initial \"tests pending\" status update to pull requests:\n+```\n+oc set volumes dc/jenkins --add -t secret --secret-name=openshift-ops-bot-secret -m /openshift-ops-bot\n+```\n \n-4. In jenkins, navigate to 'Manage Jenkins' > 'Configure Global Security'. Under 'Access Control' > 'Authorization', the radio button 'Matrix-based security' should be checked by default. In the matrix, select 'Read' access for the Anonymous group under 'Job'. Additionally, select 'Build' for the Anonymous group under 'Job'. This will allow github to post to jenkins via webhooks.\n+4. Log into the jenkins instance using the default credentials. Navigate to 'Manage Jenkins' > 'Manage Users' and click the 'config' icon for the admin user. Change the admin users password to something much more secure.\n \n-5. Due to a [bug in jenkins](https://issues.jenkins-ci.org/browse/JENKINS-28466), it is necessary to navigate to 'Manage Jenkins' > 'Configure System' and hit 'Save'. If this is not done, certain environment variables, such as `BUILD_URL` will not be available to jenkins pipeline builds.\n+5. In jenkins, navigate to 'Manage Jenkins' > 'Configure Global Security'. Under 'Access Control' > 'Authorization', the radio button 'Matrix-based security' should be checked by default. In the matrix, select 'Read' access for the Anonymous group under 'Job'. Additionally, select 'Build' for the Anonymous group under 'Job'. This will allow github to post to jenkins via webhooks.\n \n-6. In Jenkins, create a new jenkins pipeline job. Configure the following:\n+6. Due to a [bug in jenkins](https://issues.jenkins-ci.org/browse/JENKINS-28466), it is necessary to navigate to 'Manage Jenkins' > 'Configure System' and hit 'Save'. If this is not done, certain environment variables, such as `BUILD_URL` will not be available to jenkins pipeline builds.\n+\n+7. In Jenkins, create a new jenkins pipeline job. Configure the following:\n 1. Check the 'This project is parameterized' box and add a 'String Parameter' with the Name 'payload' (case-sensitive). Leave all other boxes empty.\n 2. Under 'Build Triggers', check the 'Trigger builds remotely' checkbox and specify any string to use as the authorization token. This same token will be used later to configure the github webhook.\n 3. Under 'Pipeline', you can either have jenkins pull the pipeline instructions Jenkinsfile from github, or you can copy and paste the Jenkinsfile contents from `jenkins/test/Jenkinsfile` into the text box. Grabbing the Jenkinsfile from Github will add 10-20 seconds to the build time. Select 'Pipeline script from SCM' as the pipeline definition. Choose 'Git' as the SCM and specify `https://github.com/openshift/openshift-tools` as the repository url. Leave the 'Branches to build' blank. For the 'Script path', set to 'jenkins/Jenkinsfile'\n- 4. Save the job\n+ 4. Uncheck the \"use groovy sandbox\" checkbox. The mounted secret volume cannot be accessed by the pipeline from within the sandbox.\n+ 5. Save the job\n \n-7. In github, navigate to the settings for openshift-tools with administrator permissions. Under webhooks, create a new webhook and configure the following:\n+8. In github, navigate to the settings for openshift-tools with administrator permissions. Under webhooks, create a new webhook and configure the following:\n 1. The Payload URL will be the url of the jenkins build trigger configured earlier. Here is an example where 'someuniquestring' is specified as the build trigger token: `https://jenkins-exampleproject.example.com/job/job_name/buildWithParameters?token=someuniquestring`\n 2. Set the 'Content type' to `application/x-www-form-urlencoded`. This enabled the webhook payload to be sent as a parameter to the jenkins job.\n 3. Under \"Which events would you like to trigger\", select only 'Pull request'.\n 4. Check the 'Active' box to ensure the github webhook is active\n 5. Hit 'Update webhook' to save the changes.\n \n-8. Ensure that the github user used to update pull requests has push permissions to the repository. As an adiministrator of the repository, navigate to 'Settings' > 'Collaborators' to invite the user to have 'Write' permissions.\n+9. Ensure that the github user used to update pull requests has push permissions to the repository. As an adiministrator of the repository, navigate to 'Settings' > 'Collaborators' to invite the user to have 'Write' permissions.\n \n ## Possible Gotchas\n - The `test/run_tests.py` file is the only file that is not dynamically updated with changes. This means that if you modify `test/run_tests.py` in a pull request, those changes will not take affect during testing until the changes have been merged. This occurs because `test/run_tests.py` is the entrypoint to testing that pulls in the changes. Something must run to pull in the changes made in a pull request that will be unaffected by the pull request. The testing currently runs from the 'stg' branch to help with this. Once the commit is merged to stg, the pull request submitting the changes to the prod branch will run with the updated `test/run_tests.py` to allow for proper verification.\n"
},
{
"change_type": "MODIFY",
"old_path": "jenkins/test/run_tests.py",
"new_path": "jenkins/test/run_tests.py",
"diff": "@@ -1,13 +1,16 @@\n \"\"\" Run all tests for openshift-tools repository \"\"\"\n-# This script expects a single environment variable to be defined with a myriad of json data:\n-# GITHUB_WEBHOOK_PAYLOAD\n+# This script expects a few environment variables to be defined:\n+# PULL_REQUEST - JSON represntation of the pull request to be tested\n+# PR_TITLE - Title of the pull request, extracted from pull request json for predictable json parsing\n+# PR_BODY - BODY of the pull request, extracted from pull request json for predictable json parsing\n #\n-# The data expected from this payload is that generated by the pull reqeust edit webhook,\n-# defined here:\n+# The data expected in PULL_REQUEST is defined in the github api here:\n+# https://developer.github.com/v3/pulls/#get-a-single-pull-request\n+# The same data is provided in the webhook, which is defined here:\n # https://developer.github.com/v3/activity/events/types/#pullrequestevent\n #\n-# The script will parse the JSON and define a list of environment variables for consumption by\n-# the validation scripts. Then, each *.py file in ./validators/ (minus specified exclusions)\n+# The script will parse the provided pull request JSON and define a list of environment variables for\n+# consumption by the validation scripts. Then, each *.py file in ./validators/ (minus specified exclusions)\n # will be run. The list of variables defined is below:\n # Github stuff\n # PRV_TITLE Title of the pull request\n@@ -35,14 +38,13 @@\n \n # TODO:\n # - Handle failures better. Just exiting is not a good option, as it will likely leave the PR\n-# commit status in pending forever.\n+# commit status in pending forever. We might be able to better handle this in the webhook now\n \n import os\n import json\n import subprocess\n import sys\n import fnmatch\n-import requests\n \n import github_helpers\n \n@@ -87,14 +89,14 @@ def run_cli_cmd(cmd, exit_on_fail=True, log_cmd=True):\n return True, stdout\n \n def assign_env(pull_request):\n- '''Assign environment variables base don github webhook payload json data'''\n+ '''Assign environment variables based on pull_request json data and other env variables'''\n # Github environment variables\n- # encode to utf8 to support unicode characters in python 2.x\n- os.environ[\"PRV_TITLE\"] = pull_request[\"title\"].encode('utf8')\n- # Handle pull request body in case it is empty\n- # Also encode to utf8 to support unicde characters in python 2.x\n- body = (pull_request[\"body\"].encode('utf8') if pull_request[\"body\"] is not None else \"\")\n- os.environ[\"PRV_BODY\"] = body\n+ # Note that the PR title and body are special, as unexpected characters can cause issues\n+ # when parsing json. The jenkins pipeline should remove these from the pull request json\n+ # and set them in their own environment variables PR_TITLE and PR_BODY.\n+ # Additionally, encode to utf8 to support unicode characters in python 2.x\n+ os.environ[\"PRV_TITLE\"] = os.getenv(\"PR_TITLE\", \"\")#.encode('utf8')\n+ os.environ[\"PRV_BODY\"] = os.getenv(\"PR_BODY\", \"\")#.encode('utf8')\n os.environ[\"PRV_PULL_ID\"] = pull_request[\"number\"]\n os.environ[\"PRV_URL\"] = pull_request[\"url\"]\n \n@@ -178,37 +180,24 @@ def run_validators():\n return False\n return True\n \n-# Check both the user and org whitelist for the user in this payload\n-# Additionally, if the payload is an issue_comment, check to ensure that the\n-# TEST_STRING is included in the comment.\n-def pre_test_check(payload):\n+# Check both the user and org whitelist for the user in this pull request\n+def pre_test_check(pull_request):\n \"\"\" Get and check the user whitelist for testing from mounted secret volume \"\"\"\n- # Get user from payload\n+ # Get user from pull request\n user = \"\"\n- comment_made = False\n- if \"pull_request\" in payload:\n- user = payload[\"pull_request\"][\"user\"][\"login\"]\n- elif \"comment\" in payload:\n- user = payload[\"comment\"][\"user\"][\"login\"]\n- comment_made = True\n+ if \"user\" in pull_request:\n+ user = pull_request[\"user\"][\"login\"]\n else:\n- print \"Webhook payload does not include pull request user or issue comment user data\"\n+ print \"Pull request data does not include pull request user or issue comment user data\"\n sys.exit(1)\n \n- if comment_made:\n- body = payload[\"comment\"][\"body\"]\n- if not \"[test]\" in body.split(\" \"):\n- print \"Pull request coment does not include test string \\\"\" + TEST_STRING +\"\\\"\"\n- # Exit success here so that the jenkins job is marked as a success,\n- # since no actual error occurred, the expected has happened\n- sys.exit(0)\n-\n # Get secret information from env variable\n secret_dir = os.getenv(\"WHITELIST_SECRET_DIR\")\n if secret_dir == \"\":\n print \"ERROR: $WHITELIST_SECRET_DIR undefined. This variable should exist and\" + \\\n \" should point to the mounted volume containing the admin whitelist\"\n sys.exit(2)\n+\n # Extract whitelist from secret volume\n user_whitelist_file = open(os.path.join(\"/\", secret_dir, \"users\"), \"r\")\n user_whitelist = user_whitelist_file.read()\n@@ -232,33 +221,6 @@ def check_org_whitelist(user, secret_dir):\n return True\n return False\n \n-# The payload may be for an issue_comment or for a pull_request. This method determines\n-# which of those this payload represents. If the payload is an issue_comment, the\n-# relevant pull_request information is gathered from Github\n-def get_pull_request_info(payload):\n- \"\"\" Get the relevant pull request details for this webhook payload \"\"\"\n- if \"pull_request\" in payload:\n- return payload[\"pull_request\"]\n-\n- if not \"issue\" in payload:\n- print \"Webhook payload does not include pull request or issue data\"\n- sys.exit(1)\n- if not \"pull_request\" in payload[\"issue\"]:\n- print \"Webhook payload is for an issue comment, not pull request.\"\n- sys.exit(1)\n-\n- pull_request_url = payload[\"issue\"][\"pull_request\"][\"url\"]\n- response = requests.get(pull_request_url)\n- response.raise_for_status()\n- pull_request_json = response.text\n- try:\n- pull_request = json.loads(pull_request_json, parse_int=str, parse_float=str)\n- except ValueError as error:\n- print \"Unable to load JSON data from \" + pull_request_url\n- print error\n- sys.exit(1)\n- return pull_request\n-\n def build_test_tools_rpms():\n \"\"\" Build and install the openshift-tools rpms \"\"\"\n # We only need to build the openshift-tools rpms:\n@@ -342,33 +304,27 @@ def run_unit_tests():\n return success, output\n \n def main():\n- \"\"\" Get the payload, merge changes, assign env, and run validators \"\"\"\n- # Get the github webhook payload json from the defined env variable\n- payload_json = os.getenv(\"GITHUB_WEBHOOK_PAYLOAD\", \"\")\n- if payload_json == \"\":\n- print 'No JSON data provided in $GITHUB_WEBHOOK_PAYLOAD'\n+ \"\"\" Get the pull request data, merge changes, assign env, and run validators \"\"\"\n+ # Get the pull request json from the defined env variable\n+ pull_request_json = os.getenv(\"PULL_REQUEST\", \"\")\n+ if pull_request_json == \"\":\n+ print 'No JSON data provided in $PULL_REQUEST environment variable'\n sys.exit(1)\n try:\n- payload = json.loads(payload_json, parse_int=str, parse_float=str)\n+ pull_request = json.loads(pull_request_json, parse_int=str, parse_float=str)\n except ValueError as error:\n- print \"Unable to load JSON data from $GITHUB_WEBHOOK_PAYLOAD:\"\n+ print \"Unable to load JSON data from $PULL_REQUEST environment variable:\"\n print error\n sys.exit(1)\n \n- # Run several checks to ensure tests should be run for this payload\n- pre_test_check(payload)\n-\n- # Extract or get the pull request information from the payload\n- pull_request = get_pull_request_info(payload)\n+ # Run several checks to ensure tests should be run for this pull request\n+ pre_test_check(pull_request)\n \n+ # These variables will be used at the end of testing to submit status updates\n remote_sha = pull_request[\"head\"][\"sha\"]\n pull_id = pull_request[\"number\"]\n repo = pull_request[\"base\"][\"repo\"][\"full_name\"]\n \n- # Update the PR to inform users that testing is in progress\n- github_helpers.submit_pr_status_update(\"pending\", \"Automated tests in progress\",\n- remote_sha, repo)\n-\n # Merge changes from pull request\n merge_changes(pull_request)\n \n"
}
] |
b9cb055cfabd29cd65b44800c8e85a3fb157b846 | python-lz4/python-lz4 | 31.03.2020 14:13:13 | BSD 3-Clause New or Revised License | Check for available memory instead of its total amount in tests
This change prevents the python interpreter from triggering the
OOM-killer while trying to use/allocate more memory than available on
the system. | [
{
"change_type": "MODIFY",
"old_path": "tests/block/test_block_2.py",
"new_path": "tests/block/test_block_2.py",
"diff": "@@ -28,7 +28,7 @@ _4GB = 0x100000000 # 4GB\n reason='Py_ssize_t too small for this test'\n )\n @pytest.mark.skipif(\n- psutil.virtual_memory().total < _4GB,\n+ psutil.virtual_memory().available < _4GB,\n reason='Insufficient system memory for this test'\n )\n def test_huge():\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/stream/test_stream_1.py",
"new_path": "tests/stream/test_stream_1.py",
"diff": "@@ -150,7 +150,7 @@ def test_invalid_config_c_4(store_comp_size):\n if sys.maxsize < 0xffffffff:\n pytest.skip('Py_ssize_t too small for this test')\n \n- if psutil.virtual_memory().total < 3 * c_kwargs['buffer_size']:\n+ if psutil.virtual_memory().available < 3 * c_kwargs['buffer_size']:\n # The internal LZ4 context will request at least 3 times buffer_size\n # as memory (2 buffer_size for the double-buffer, and 1.x buffer_size\n # for the output buffer)\n@@ -210,7 +210,7 @@ def test_invalid_config_c_5():\n if sys.maxsize < 0xffffffff:\n pytest.skip('Py_ssize_t too small for this test')\n \n- if psutil.virtual_memory().total < 3 * c_kwargs['buffer_size']:\n+ if psutil.virtual_memory().available < 3 * c_kwargs['buffer_size']:\n # The internal LZ4 context will request at least 3 times buffer_size\n # as memory (2 buffer_size for the double-buffer, and 1.x buffer_size\n # for the output buffer)\n@@ -243,7 +243,7 @@ def test_invalid_config_d_5():\n if sys.maxsize < 0xffffffff:\n pytest.skip('Py_ssize_t too small for this test')\n \n- if psutil.virtual_memory().total < 3 * d_kwargs['buffer_size']:\n+ if psutil.virtual_memory().available < 3 * d_kwargs['buffer_size']:\n # The internal LZ4 context will request at least 3 times buffer_size\n # as memory (2 buffer_size for the double-buffer, and 1.x buffer_size\n # for the output buffer)\n@@ -260,7 +260,7 @@ def test_invalid_config_d_5():\n if sys.maxsize < 0xffffffff:\n pytest.skip('Py_ssize_t too small for this test')\n \n- if psutil.virtual_memory().total < 3 * d_kwargs['buffer_size']:\n+ if psutil.virtual_memory().available < 3 * d_kwargs['buffer_size']:\n # The internal LZ4 context will request at least 3 times buffer_size\n # as memory (2 buffer_size for the double-buffer, and 1.x buffer_size\n # for the output buffer)\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/stream/test_stream_2.py",
"new_path": "tests/stream/test_stream_2.py",
"diff": "@@ -35,7 +35,7 @@ except (MemoryError, OverflowError):\n reason='Py_ssize_t too small for this test'\n )\n @pytest.mark.skipif(\n- psutil.virtual_memory().total < _4GB or huge is None,\n+ psutil.virtual_memory().available < _4GB or huge is None,\n reason='Insufficient system memory for this test'\n )\n def test_huge_1():\n@@ -47,7 +47,7 @@ def test_huge_1():\n 'dictionary': huge,\n }\n \n- if psutil.virtual_memory().total < 3 * kwargs['buffer_size']:\n+ if psutil.virtual_memory().available < 3 * kwargs['buffer_size']:\n # The internal LZ4 context will request at least 3 times buffer_size\n # as memory (2 buffer_size for the double-buffer, and 1.x buffer_size\n # for the output buffer)\n@@ -74,7 +74,7 @@ def test_huge_1():\n reason='Py_ssize_t too small for this test'\n )\n @pytest.mark.skipif(\n- psutil.virtual_memory().total < _4GB or huge is None,\n+ psutil.virtual_memory().available < _4GB or huge is None,\n reason='Insufficient system memory for this test'\n )\n def test_huge_2():\n@@ -86,7 +86,7 @@ def test_huge_2():\n 'dictionary': b'',\n }\n \n- if psutil.virtual_memory().total < 3 * kwargs['buffer_size']:\n+ if psutil.virtual_memory().available < 3 * kwargs['buffer_size']:\n # The internal LZ4 context will request at least 3 times buffer_size\n # as memory (2 buffer_size for the double-buffer, and 1.x buffer_size\n # for the output buffer)\n@@ -114,7 +114,7 @@ def test_huge_2():\n reason='Py_ssize_t too small for this test'\n )\n @pytest.mark.skipif(\n- psutil.virtual_memory().total < _4GB or huge is None,\n+ psutil.virtual_memory().available < _4GB or huge is None,\n reason='Insufficient system memory for this test'\n )\n def test_huge_3():\n@@ -126,7 +126,7 @@ def test_huge_3():\n 'dictionary': huge,\n }\n \n- if psutil.virtual_memory().total < 3 * kwargs['buffer_size']:\n+ if psutil.virtual_memory().available < 3 * kwargs['buffer_size']:\n # The internal LZ4 context will request at least 3 times buffer_size\n # as memory (2 buffer_size for the double-buffer, and 1.x buffer_size\n # for the output buffer)\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/stream/test_stream_3.py",
"new_path": "tests/stream/test_stream_3.py",
"diff": "@@ -89,7 +89,7 @@ def test_block_decompress_mem_usage(data, buffer_size):\n if sys.maxsize < 0xffffffff:\n pytest.skip('Py_ssize_t too small for this test')\n \n- if psutil.virtual_memory().total < 3 * kwargs['buffer_size']:\n+ if psutil.virtual_memory().available < 3 * kwargs['buffer_size']:\n # The internal LZ4 context will request at least 3 times buffer_size\n # as memory (2 buffer_size for the double-buffer, and 1.x buffer_size\n # for the output buffer)\n"
}
] |
84b110bc95d6a100081821c0daab9bbca4ad9794 | python-lz4/python-lz4 | 02.04.2020 15:30:40 | BSD 3-Clause New or Revised License | Reduce pressure on memory in stream tests
This change runs the python garbage collector before and after each
stream test.
The garbage collector is disabled in the CI since it has a significant
impact on the duration of the jobs (which are time constrained in the CI). | [
{
"change_type": "MODIFY",
"old_path": "tests/stream/test_stream_0.py",
"new_path": "tests/stream/test_stream_0.py",
"diff": "@@ -1,10 +1,29 @@\n import lz4.stream\n import sys\n import pytest\n+import gc\n+import os\n if sys.version_info <= (3, 2):\n import struct\n \n \n+def run_gc(func):\n+ if os.environ.get('TRAVIS') is not None or os.environ.get('APPVEYOR') is not None:\n+ def wrapper(*args, **kwargs):\n+ return func(*args, **kwargs)\n+ else:\n+ def wrapper(*args, **kwargs):\n+ gc.collect()\n+ try:\n+ result = func(*args, **kwargs)\n+ finally:\n+ gc.collect()\n+ return result\n+\n+ wrapper.__name__ = func.__name__\n+ return wrapper\n+\n+\n def get_stored_size(buff, block_length_size):\n if sys.version_info > (2, 7):\n if isinstance(buff, memoryview):\n@@ -26,6 +45,7 @@ def get_stored_size(buff, block_length_size):\n return struct.unpack('<' + fmt[block_length_size], b[:block_length_size])[0]\n \n \n+@run_gc\n def roundtrip(x, c_kwargs, d_kwargs, dictionary):\n if dictionary:\n if isinstance(dictionary, tuple):\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/stream/test_stream_1.py",
"new_path": "tests/stream/test_stream_1.py",
"diff": "@@ -3,6 +3,8 @@ import pytest\n import sys\n import os\n import psutil\n+import gc\n+\n \n if sys.version_info < (3, ):\n from struct import pack, unpack\n@@ -42,6 +44,40 @@ _4GB = 0x100000000 # 4GB\n # fragile.\n \n \n+def run_gc(func):\n+ if os.environ.get('TRAVIS') is not None or os.environ.get('APPVEYOR') is not None:\n+ def wrapper(*args, **kwargs):\n+ return func(*args, **kwargs)\n+ else:\n+ def wrapper(*args, **kwargs):\n+ gc.collect()\n+ try:\n+ result = func(*args, **kwargs)\n+ finally:\n+ gc.collect()\n+ return result\n+\n+ wrapper.__name__ = func.__name__\n+ return wrapper\n+\n+\n+def run_gc_param_store_comp_size(func):\n+ if os.environ.get('TRAVIS') is not None:\n+ def wrapper(store_comp_size, *args, **kwargs):\n+ return func(store_comp_size, *args, **kwargs)\n+ else:\n+ def wrapper(store_comp_size, *args, **kwargs):\n+ gc.collect()\n+ try:\n+ result = func(store_comp_size, *args, **kwargs)\n+ finally:\n+ gc.collect()\n+ return result\n+\n+ wrapper.__name__ = func.__name__\n+ return wrapper\n+\n+\n def compress(x, c_kwargs, return_block_offset=False, check_block_type=False):\n o = [0, ]\n if c_kwargs.get('return_bytearray', False):\n@@ -80,6 +116,7 @@ def decompress(x, d_kwargs, check_chunk_type=False):\n return d\n \n \n+@run_gc\n def test_invalid_config_c_1():\n c_kwargs = {}\n c_kwargs['strategy'] = \"ring_buffer\"\n@@ -89,6 +126,7 @@ def test_invalid_config_c_1():\n lz4.stream.LZ4StreamCompressor(**c_kwargs)\n \n \n+@run_gc\n def test_invalid_config_d_1():\n d_kwargs = {}\n d_kwargs['strategy'] = \"ring_buffer\"\n@@ -98,6 +136,7 @@ def test_invalid_config_d_1():\n lz4.stream.LZ4StreamDecompressor(**d_kwargs)\n \n \n+@run_gc\n def test_invalid_config_c_2():\n c_kwargs = {}\n c_kwargs['strategy'] = \"foo\"\n@@ -107,6 +146,7 @@ def test_invalid_config_c_2():\n lz4.stream.LZ4StreamCompressor(**c_kwargs)\n \n \n+@run_gc\n def test_invalid_config_d_2():\n d_kwargs = {}\n d_kwargs['strategy'] = \"foo\"\n@@ -116,6 +156,7 @@ def test_invalid_config_d_2():\n lz4.stream.LZ4StreamDecompressor(**d_kwargs)\n \n \n+@run_gc_param_store_comp_size\n def test_invalid_config_c_3(store_comp_size):\n c_kwargs = {}\n c_kwargs['strategy'] = \"double_buffer\"\n@@ -126,6 +167,7 @@ def test_invalid_config_c_3(store_comp_size):\n lz4.stream.LZ4StreamCompressor(**c_kwargs)\n \n \n+@run_gc_param_store_comp_size\n def test_invalid_config_d_3(store_comp_size):\n d_kwargs = {}\n d_kwargs['strategy'] = \"double_buffer\"\n@@ -136,6 +178,7 @@ def test_invalid_config_d_3(store_comp_size):\n lz4.stream.LZ4StreamDecompressor(**d_kwargs)\n \n \n+@run_gc_param_store_comp_size\n def test_invalid_config_c_4(store_comp_size):\n c_kwargs = {}\n c_kwargs['strategy'] = \"double_buffer\"\n@@ -161,6 +204,7 @@ def test_invalid_config_c_4(store_comp_size):\n lz4.stream.LZ4StreamCompressor(**c_kwargs)\n \n \n+@run_gc_param_store_comp_size\n def test_invalid_config_d_4(store_comp_size):\n d_kwargs = {}\n d_kwargs['strategy'] = \"double_buffer\"\n@@ -189,6 +233,7 @@ def test_invalid_config_d_4(store_comp_size):\n lz4.stream.LZ4StreamDecompressor(**d_kwargs)\n \n \n+@run_gc\n def test_invalid_config_c_5():\n c_kwargs = {}\n c_kwargs['strategy'] = \"double_buffer\"\n@@ -220,6 +265,7 @@ def test_invalid_config_c_5():\n lz4.stream.LZ4StreamCompressor(**c_kwargs)\n \n \n+@run_gc\n def test_invalid_config_d_5():\n d_kwargs = {}\n d_kwargs['strategy'] = \"double_buffer\"\n@@ -276,6 +322,7 @@ def test_invalid_config_d_5():\n lz4.stream.LZ4StreamDecompressor(**d_kwargs)\n \n \n+@run_gc\n def test_decompress_corrupted_input_1():\n c_kwargs = {'strategy': \"double_buffer\", 'buffer_size': 128, 'store_comp_size': 4}\n \n@@ -291,6 +338,7 @@ def test_decompress_corrupted_input_1():\n decompress(data[4:], d_kwargs)\n \n \n+@run_gc\n def test_decompress_corrupted_input_2():\n c_kwargs = {'strategy': \"double_buffer\", 'buffer_size': 128, 'store_comp_size': 4}\n \n@@ -315,6 +363,7 @@ def test_decompress_corrupted_input_2():\n decompress(data, d_kwargs)\n \n \n+@run_gc\n def test_decompress_corrupted_input_3():\n c_kwargs = {'strategy': \"double_buffer\", 'buffer_size': 128, 'store_comp_size': 4}\n \n@@ -344,6 +393,7 @@ def test_decompress_corrupted_input_3():\n decompress(data, d_kwargs)\n \n \n+@run_gc\n def test_decompress_corrupted_input_4():\n c_kwargs = {'strategy': \"double_buffer\", 'buffer_size': 128, 'store_comp_size': 4}\n \n@@ -375,6 +425,7 @@ def test_decompress_corrupted_input_4():\n decompress(data, d_kwargs)\n \n \n+@run_gc\n def test_decompress_truncated():\n c_kwargs = {'strategy': \"double_buffer\", 'buffer_size': 128, 'store_comp_size': 4}\n \n@@ -407,6 +458,7 @@ def test_decompress_truncated():\n # we will keep them for now\n \n \n+@run_gc\n def test_decompress_with_trailer():\n c_kwargs = {'strategy': \"double_buffer\", 'buffer_size': 128, 'store_comp_size': 4}\n \n@@ -433,6 +485,7 @@ def test_decompress_with_trailer():\n decompress(comp + b'\\x00' * n, d_kwargs)\n \n \n+@run_gc\n def test_unicode():\n if sys.version_info < (3,):\n return # skip\n@@ -452,6 +505,7 @@ def test_unicode():\n # for now\n \n \n+@run_gc\n def test_return_bytearray():\n if sys.version_info < (3,):\n return # skip\n@@ -473,6 +527,7 @@ def test_return_bytearray():\n assert bytes(b) == data\n \n \n+@run_gc\n def test_memoryview():\n if sys.version_info < (2, 7):\n return # skip\n@@ -488,6 +543,7 @@ def test_memoryview():\n assert decompress(memoryview(compressed), d_kwargs) == data\n \n \n+@run_gc\n def test_with_dict_none():\n kwargs = {'strategy': \"double_buffer\", 'buffer_size': 128, 'store_comp_size': 4}\n \n@@ -530,6 +586,7 @@ def test_with_dict_none():\n assert decompress(compress(input_data, c_kwargs), d_kwargs) == input_data\n \n \n+@run_gc\n def test_with_dict():\n kwargs = {'strategy': \"double_buffer\", 'buffer_size': 128, 'store_comp_size': 4}\n \n@@ -568,6 +625,7 @@ def test_with_dict():\n assert decompress(compress(input_data, c_kwargs), d_kwargs) == input_data\n \n \n+@run_gc\n def test_known_decompress_1():\n d_kwargs = {'strategy': \"double_buffer\", 'buffer_size': 128, 'store_comp_size': 4}\n \n@@ -582,6 +640,7 @@ def test_known_decompress_1():\n assert decompress(input, d_kwargs) == output\n \n \n+@run_gc\n def test_known_decompress_2():\n d_kwargs = {'strategy': \"double_buffer\", 'buffer_size': 128, 'store_comp_size': 4}\n \n@@ -590,6 +649,7 @@ def test_known_decompress_2():\n assert decompress(input, d_kwargs) == output\n \n \n+@run_gc\n def test_known_decompress_3():\n d_kwargs = {'strategy': \"double_buffer\", 'buffer_size': 128, 'store_comp_size': 4}\n \n@@ -599,6 +659,7 @@ def test_known_decompress_3():\n assert decompress(input, d_kwargs) == output\n \n \n+@run_gc\n def test_known_decompress_4():\n d_kwargs = {'strategy': \"double_buffer\", 'buffer_size': 128, 'store_comp_size': 4}\n \n"
},
{
"change_type": "MODIFY",
"old_path": "tests/stream/test_stream_2.py",
"new_path": "tests/stream/test_stream_2.py",
"diff": "@@ -3,6 +3,24 @@ import sys\n import lz4.stream\n import psutil\n import os\n+import gc\n+\n+\n+def run_gc(func):\n+ if os.environ.get('TRAVIS') is not None or os.environ.get('APPVEYOR') is not None:\n+ def wrapper(*args, **kwargs):\n+ return func(*args, **kwargs)\n+ else:\n+ def wrapper(*args, **kwargs):\n+ gc.collect()\n+ try:\n+ result = func(*args, **kwargs)\n+ finally:\n+ gc.collect()\n+ return result\n+\n+ wrapper.__name__ = func.__name__\n+ return wrapper\n \n \n # This test requires allocating a big lump of memory. In order to\n@@ -42,6 +60,7 @@ else:\n psutil.virtual_memory().available < _4GB or huge is None,\n reason='Insufficient system memory for this test'\n )\n+@run_gc\n def test_huge_1():\n data = b''\n kwargs = {\n@@ -81,6 +100,7 @@ def test_huge_1():\n psutil.virtual_memory().available < _4GB or huge is None,\n reason='Insufficient system memory for this test'\n )\n+@run_gc\n def test_huge_2():\n data = huge\n kwargs = {\n@@ -121,6 +141,7 @@ def test_huge_2():\n psutil.virtual_memory().available < _4GB or huge is None,\n reason='Insufficient system memory for this test'\n )\n+@run_gc\n def test_huge_3():\n data = huge\n kwargs = {\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/stream/test_stream_3.py",
"new_path": "tests/stream/test_stream_3.py",
"diff": "@@ -3,6 +3,7 @@ import pytest\n import sys\n import os\n import psutil\n+import gc\n \n \n _1KB = 1024\n@@ -10,6 +11,23 @@ _1MB = _1KB * 1024\n _1GB = _1MB * 1024\n \n \n+def run_gc_param_data_buffer_size(func):\n+ if os.environ.get('TRAVIS') is not None or os.environ.get('APPVEYOR') is not None:\n+ def wrapper(data, buffer_size, *args, **kwargs):\n+ return func(data, buffer_size, *args, **kwargs)\n+ else:\n+ def wrapper(data, buffer_size, *args, **kwargs):\n+ gc.collect()\n+ try:\n+ result = func(data, buffer_size, *args, **kwargs)\n+ finally:\n+ gc.collect()\n+ return result\n+\n+ wrapper.__name__ = func.__name__\n+ return wrapper\n+\n+\n def compress(x, c_kwargs):\n if c_kwargs.get('return_bytearray', False):\n c = bytearray()\n@@ -73,6 +91,7 @@ def data(request):\n return request.param\n \n \n+@run_gc_param_data_buffer_size\n def test_block_decompress_mem_usage(data, buffer_size):\n kwargs = {\n 'strategy': \"double_buffer\",\n"
}
] |
eede0e1505faec4218a95b5b27702d68f3aa4f88 | siliconcompiler/siliconcompiler | 09.06.2021 15:57:10 | Apache License 2.0 | Fix multiline strings in schema examples
Using '\' or multiline strings results in extra whitespace, so it seems
best to use Python's automatic concatenation of adjacent strings. | [
{
"change_type": "MODIFY",
"old_path": "siliconcompiler/schema.py",
"new_path": "siliconcompiler/schema.py",
"diff": "@@ -638,8 +638,8 @@ def schema_pdk(cfg):\n 'short_help' : 'Primitive Libraries',\n 'param_help' : \"pdk plib stackvar toolvar formatvar <file>\",\n 'example': [\"\"\"cli: -pdk_plib 'M10 klayout oa /disk/asap7/oa/devlib'\"\"\",\n- \"\"\"api: chip.add('pdk','plib','M10', 'klayout', 'oa', \n- '/disk/asap7/oa/devlib')\"\"\"],\n+ \"api: chip.add('pdk','plib','M10', 'klayout', 'oa',\"\n+ \"'/disk/asap7/oa/devlib')\"],\n 'help' : \"\"\"\n Filepaths to all primitive cell libraries supported by the PDK. The \n filepaths are entered on a per stackup and per format basis.\n@@ -663,8 +663,8 @@ def schema_pdk(cfg):\n 'short_help' : 'APR Technology File',\n 'param_help' : \"pdk aprtech stackvar libtypevar filetypevar <file>\",\n 'example': [\"\"\"cli: -pdk_aprtech 'M10 12t lef tech.lef'\"\"\",\n- \"\"\"api: chip.add('pdk','aprtech','M10','12t','lef',\n- 'tech.lef')\"\"\"],\n+ \"api: chip.add('pdk','aprtech','M10','12t','lef',\"\n+ \"'tech.lef')\"],\n 'help' : \"\"\"\n Technology file containing the design rule and setup information needed\n to enable DRC clean automated placement a routing. The file is \n@@ -689,8 +689,8 @@ def schema_pdk(cfg):\n 'short_help' : 'Grid Layer Name Map',\n 'param_help' : \"pdk grid stackvar layervar name <str>\",\n 'example': [\"\"\"cli: -pdk_grid_name 'M10 m1 metal1'\"\"\",\n- \"\"\"api: chip.add('pdk', 'grid', 'M10', 'm1', 'name',\n- 'metal1')\"\"\"],\n+ \"api: chip.add('pdk', 'grid', 'M10', 'm1', 'name',\"\n+ \"'metal1')\"],\n 'help' : \"\"\"\n Defines the hardcoded PDK metal name on a per stackup and per metal \n basis. Metal layers are ordered from m1 to mn, where m1 is the lowest\n@@ -707,8 +707,8 @@ def schema_pdk(cfg):\n 'short_help' : 'Grid Layer Horizontal Grid',\n 'param_help' : \"pdk grid stackvar layervar xpitch <num>\",\n 'example': [\"\"\"cli: -pdk_grid_xpitch 'M10 m1 0.5'\"\"\",\n- \"\"\"api: chip.add('pdk','grid','M10','m1','xpitch',\n- '0.5')\"\"\"],\n+ \"api: chip.add('pdk','grid','M10','m1','xpitch',\"\n+ \"'0.5')\"],\n 'help' : \"\"\"\n Defines the vertical routing grid on a a per stackup and per metal \n basis. Values are specified in um. Metal layers are ordered from m1 to \n@@ -726,8 +726,8 @@ def schema_pdk(cfg):\n 'short_help' : 'Grid Layer Vertical Grid',\n 'param_help' : \"pdk grid stackvar layervar ypitch <num>'\",\n 'example': [\"\"\"cli: -pdk_grid_ypitch 'M10 m2 0.5'\"\"\",\n- \"\"\"api: chip.add('pdk','grid','M10','m2','ypitch',\n- '0.5')\"\"\"],\n+ \"api: chip.add('pdk','grid','M10','m2','ypitch',\"\n+ \"'0.5')\"],\n 'help' : \"\"\"\n Defines the horizontal routing grid on a a per stackup and per metal \n basis. Values are specified in um. Metal layers are ordered from m1 to\n@@ -745,8 +745,8 @@ def schema_pdk(cfg):\n 'short_help' : 'Grid Layer Preferred Direction',\n 'param_help' : \"pdk grid stackvar layervar xoffset <num>\",\n 'example': [\"\"\"cli: -pdk_grid_xoffset 'M10 m2 0.5'\"\"\",\n- \"\"\"api: chip.add('pdk','grid','M10','m2','xoffset',\n- '0.5')\"\"\"],\n+ \"api: chip.add('pdk','grid','M10','m2','xoffset',\"\n+ \"'0.5')\"],\n 'help' : \"\"\"\n Defines the horizontal grid offset of a metal layer specified on a per \n stackup and per metal basis. Values are specified in um.\n@@ -763,8 +763,8 @@ def schema_pdk(cfg):\n 'short_help' : 'Grid Layer Preferred Direction',\n 'param_help' : \"pdk grid stackvar layervar yoffset <num>\",\n 'example': [\"\"\"cli: -pdk_grid_yoffset 'M10 m2 0.5'\"\"\",\n- \"\"\"api: chip.add('pdk','grid','M10','m2','yoffset',\n- '0.5')\"\"\"],\n+ \"api: chip.add('pdk','grid','M10','m2','yoffset',\"\n+ \"'0.5')\"],\n 'help' : \"\"\"\n Defines the horizontal grid offset of a metal layer specified on a per \n stackup and per metal basis. Values are specified in um.\n@@ -781,8 +781,8 @@ def schema_pdk(cfg):\n 'short_help' : 'Grid Layer Routing Adjustment',\n 'param_help' : \"pdk grid stackvar layervar adj <num>\",\n 'example': [\"\"\"cli: -pdk_grid_adj 'M10 m2 0.5'\"\"\",\n- \"\"\"api: chip.set('pdk','grid','M10','m2','adj',\n- '0.5')\"\"\"],\n+ \"api: chip.set('pdk','grid','M10','m2','adj',\"\n+ \"'0.5')\"],\n 'help' : \"\"\"\n Defines the routing resources adjustments for the design on a per layer\n basis. The value is expressed as a fraction from 0 to 1. A value of\n@@ -800,8 +800,8 @@ def schema_pdk(cfg):\n 'short_help' : 'Grid Layer Routing Layer Capacitance',\n 'param_help' : \"pdk grid stackvar layervar cap <num>\",\n 'example': [\"\"\"cli: -pdk_grid_cap 'M10 m2 0.2'\"\"\",\n- \"\"\"api: chip.set('pdk','grid','M10','m2','cap',\n- '0.2')\"\"\"],\n+ \"api: chip.set('pdk','grid','M10','m2','cap',\"\n+ \"0.2')\"],\n 'help' : \"\"\"\n Specifies the unit capacitance of a wire defined by the grid\n width and spacing values in the 'grid' structure. The\n@@ -821,8 +821,8 @@ def schema_pdk(cfg):\n 'short_help' : 'Grid Layer Routing Layer Resistance',\n 'param_help' : \"pdk grid stackvar layervar res <num>\",\n 'example': [\"\"\"cli: -pdk_grid_res 'M10 m2 0.2'\"\"\",\n- \"\"\"api: chip.set('pdk','grid','M10','m2','res',\n- '0.2')\"\"\"],\n+ \"api: chip.set('pdk','grid','M10','m2','res',\"\n+ \"'0.2')\"],\n 'help' : \"\"\"\n Specifies the resistance of a wire defined by the grid\n width and spacing values in the 'grid' structure. The\n@@ -842,8 +842,8 @@ def schema_pdk(cfg):\n 'short_help' : 'Grid Layer Temperature Coefficent',\n 'param_help' : \"pdk grid stackvar layervar tcr <num>\",\n 'example': [\"\"\"cli: -pdk_grid_tcr 'M10 m2 0.1'\"\"\",\n- \"\"\"api: chip.set('pdk','grid','M10','m2','tcr',\n- '0.1')\"\"\"],\n+ \"api: chip.set('pdk','grid','M10','m2','tcr',\"\n+ \"'0.1')\"],\n 'help' : \"\"\"\n Specifies the temperature coefficient of resistance of the wire \n defined by the grid width and spacing values in the 'grid' \n@@ -1066,8 +1066,8 @@ def schema_libs(cfg, group):\n 'short_help' : group.capitalize() + ' Operating Condition',\n 'param_help' : group+\" libvar model cornervar opcond <str>\",\n 'example':[\"cli: -\"+group+\"_opcond 'lib model ss_1.0v_125c WORST'\",\n- \"api: chip.add('\"+group+\"','lib','model','ss_1.0v_125c', \\\n- 'opcond', 'WORST'\"],\n+ \"api: chip.add('\"+group+\"','lib','model','ss_1.0v_125c',\"\n+ \"'opcond', 'WORST'\"],\n 'help' : \"\"\"\n The default operating condition to use for mcmm optimization and\n signoff on a per corner basis.\n@@ -1084,8 +1084,8 @@ def schema_libs(cfg, group):\n 'short_help' : group.capitalize() + ' Corner Checks',\n 'param_help' : group+\" libvar model cornervar check <str>\",\n 'example':[\"cli: -\"+group+\"_check 'lib model ss_1.0v_125c setup'\",\n- \"api: chip.add('\"+group+\"','lib','model','ss_1.0v_125c', \\\n- 'check', 'setup'\"],\n+ \"api: chip.add('\"+group+\"','lib','model','ss_1.0v_125c',\"\n+ \"'check', 'setup'\"],\n 'help' : \"\"\"\n Per corner checks to perform during optimization and STA signoff.\n Names used in the 'mcmm' scenarios must align with the 'check' names\n@@ -1113,8 +1113,8 @@ def schema_libs(cfg, group):\n 'short_help' : group.capitalize() + ' NLDM Timing Model',\n 'param_help' : group+\" libvar model cornervar nldm typevar <file>\",\n 'example':[\"cli: -\"+group+\"_nldm 'lib model ss gz ss.lib.gz'\",\n- \"api: chip.add('\"+group+\"','lib','model','ss','nldm', \\\n- 'gz', 'ss.lib.gz'\"],\n+ \"api: chip.add('\"+group+\"','lib','model','ss','nldm',\"\n+ \"'gz', 'ss.lib.gz'\"],\n 'help' : \"\"\"\n Filepaths to NLDM models. Timing files are specified on a per lib,\n per corner, and per format basis. The format is driven by EDA tool\n@@ -1139,8 +1139,8 @@ def schema_libs(cfg, group):\n 'short_help' : group.capitalize() + ' CCS Timing Model',\n 'param_help' : group+\" libvar model cornervar ccs typevar <file>\",\n 'example':[\"cli: -\"+group+\"_ccs 'lib model ss lib.gz ss.lib.gz'\",\n- \"api: chip.add('\"+group+\"','lib','model','ss','ccs', \\\n- 'gz', 'ss.lib.gz'\"],\n+ \"api: chip.add('\"+group+\"','lib','model','ss','ccs',\"\n+ \"'gz', 'ss.lib.gz'\"],\n 'help' : \"\"\"\n Filepaths to CCS models. Timing files are specified on a per lib,\n per corner, and per format basis. The format is driven by EDA tool\n@@ -1165,8 +1165,8 @@ def schema_libs(cfg, group):\n 'short_help' : group.capitalize() + ' SCM Timing Model',\n 'param_help' : group+\" libvar model cornervar scm typevar <file>\",\n 'example':[\"cli: -\"+group+\"_scm 'lib model ss lib.gz ss.lib.gz'\",\n- \"api: chip.add('\"+group+\"','lib','model','ss', 'scm', \\\n- 'gz', 'ss.lib.gz'\"],\n+ \"api: chip.add('\"+group+\"','lib','model','ss', 'scm',\"\n+ \"'gz', 'ss.lib.gz'\"],\n 'help' : \"\"\"\n Filepaths to SCM models. Timing files are specified on a per lib,\n per corner, and per format basis. The format is driven by EDA tool\n@@ -1190,8 +1190,8 @@ def schema_libs(cfg, group):\n 'short_help' : group.capitalize() + ' AOCV Timing Model',\n 'param_help' : group+\" libvar model cornervar aocv <file>\",\n 'example':[\"cli: -\"+group+\"_aocv 'lib model ss lib.aocv'\",\n- \"api: chip.add('\"+group+\"','lib','model','ss', 'aocv', \\\n- 'lib_ss.aocv'\"],\n+ \"api: chip.add('\"+group+\"','lib','model','ss', 'aocv',\"\n+ \"'lib_ss.aocv'\"],\n 'help': \"\"\"\n Filepaths to AOCV models. Timing files are specified on a per lib,\n per corner basis. \n@@ -1214,8 +1214,8 @@ def schema_libs(cfg, group):\n 'short_help' : group.capitalize() + ' APL Power Model',\n 'param_help' : group+\" libvar model cornervar apl typevar <file>\",\n 'example':[\"cli: -\"+group+\"_apl 'lib model ss cdev lib_tt.cdev'\",\n- \"api: chip.add('\"+group+\"','lib','model','ss','apl','cdev',\\\n- 'lib_tt.cdev'\"],\n+ \"api: chip.add('\"+group+\"','lib','model','ss','apl','cdev',\"\n+ \"'lib_tt.cdev'\"],\n 'help' : \"\"\"\n Filepaths to APL power models. Power files are specified on a per\n lib, per corner, and per format basis.\n@@ -1308,8 +1308,8 @@ def schema_libs(cfg, group):\n 'short_help' : group.capitalize() + ' Spice Netlist',\n 'param_help' : group+\" libvar spice format <file>\",\n 'example':[\"cli: -\"+group+\"_spice 'mylib pspice mylib.sp'\",\n- \"api: chip.add('\"+group+\"','mylib','spice', 'pspice',\\\n- 'mylib.sp')\"],\n+ \"api: chip.add('\"+group+\"','mylib','spice', 'pspice',\"\n+ \"'mylib.sp')\"],\n 'help' : \"\"\"\n Files containing library spice netlists used for circuit \n simulation, specified on a per format basis. \n@@ -1330,8 +1330,8 @@ def schema_libs(cfg, group):\n 'short_help' : group.capitalize() + ' HDL Model',\n 'param_help' : group+\" libvar hdl formatvar <file>\",\n 'example':[\"cli: -\"+group+\"_hdl 'mylib verilog mylib.v'\",\n- \"api: chip.add('\"+group+\"','mylib','hdl', 'verilog',\\\n- 'mylib.v')\"],\n+ \"api: chip.add('\"+group+\"','mylib','hdl', 'verilog',\"\n+ \"'mylib.v')\"],\n 'help' : \"\"\"\n Library HDL models, specifed on a per format basis. Examples\n of legal formats include Verilog, VHDL.\n@@ -1439,8 +1439,8 @@ def schema_libs(cfg, group):\n 'short_help' : group.capitalize() + ' Cell Lists',\n 'param_help' : group+\" libvar cells groupvar <str>\",\n 'example':[\"cli: -\"+group+\"_cells 'mylib dontuse *eco*'\",\n- \"api: chip.add('\"+group+\"','mylib','cells','dontuse', \\\n- '*eco*')\"],\n+ \"api: chip.add('\"+group+\"','mylib','cells','dontuse',\"\n+ \"'*eco*')\"],\n 'help' : \"\"\"\n A named list of cells grouped by a property that can be accessed\n directly by the designer and EDA tools. The example below shows how\n@@ -1465,8 +1465,8 @@ def schema_libs(cfg, group):\n 'short_help' : group.capitalize() + ' Layout Database',\n 'param_help' : group+\" libvar layoutdb stackvar formatvar <file>\",\n 'example':[\"cli: -\"+group+\"_layoutdb 'mylib M10 oa /disk/mylibdb'\",\n- \"api: chip.add('\"+group+\"','mylib','layoutdb','M10', \\\n- 'oa', '/disk/mylibdb')\"],\n+ \"api: chip.add('\"+group+\"','mylib','layoutdb','M10',\"\n+ \"'oa', '/disk/mylibdb')\"],\n 'help' : \"\"\"\n Filepaths to compiled library layout database specified on a per format\n basis. Example formats include oa, mw, ndm.\n"
}
] |
263bb8018dd186fa5b4fc580fca869be35c4dcf5 | siliconcompiler/siliconcompiler | 15.02.2022 16:50:13 | Apache License 2.0 | Remove PDK-specific var defaults from openroad.py
We shouldn't be hardcoding tech-specific values in tool setup scripts,
so we should use the new ['pdk', 'variable'] parameter to drive these.
In order to support user overrides, we'll still drive them through the
['eda', ... 'variable'] keypath in the TCL script. | [
{
"change_type": "MODIFY",
"old_path": "siliconcompiler/pdks/asap7.py",
"new_path": "siliconcompiler/pdks/asap7.py",
"diff": "@@ -135,6 +135,13 @@ def setup(chip):\n chip.set('pdk','grid', stackup, layer, 'ypitch', 0.08)\n chip.set('pdk','grid', stackup, layer, 'adj', 0.4)\n \n+ # Defaults for OpenROAD tool variables\n+ chip.set('pdk', 'variable', stackup, 'openroad', 'place_density', ['0.77'])\n+ chip.set('pdk', 'variable', stackup, 'openroad', 'pad_global_place', ['2'])\n+ chip.set('pdk', 'variable', stackup, 'openroad', 'pad_detail_place', ['1'])\n+ chip.set('pdk', 'variable', stackup, 'openroad', 'macro_place_halo', ['22.4', '15.12'])\n+ chip.set('pdk', 'variable', stackup, 'openroad', 'macro_place_channel', ['18.8', '19.95'])\n+\n #########################\n if __name__ == \"__main__\":\n \n"
},
{
"change_type": "MODIFY",
"old_path": "siliconcompiler/pdks/freepdk45.py",
"new_path": "siliconcompiler/pdks/freepdk45.py",
"diff": "@@ -154,6 +154,13 @@ def setup(chip):\n else:\n chip.set('pdk','grid', stackup, layer, 'dir', 'horizontal')\n \n+ # Defaults for OpenROAD tool variables\n+ chip.set('pdk', 'variable', 'openroad', stackup, 'place_density', ['0.3'])\n+ chip.set('pdk', 'variable', 'openroad', stackup, 'pad_global_place', ['2'])\n+ chip.set('pdk', 'variable', 'openroad', stackup, 'pad_detail_place', ['1'])\n+ chip.set('pdk', 'variable', 'openroad', stackup, 'macro_place_halo', ['22.4', '15.12'])\n+ chip.set('pdk', 'variable', 'openroad', stackup, 'macro_place_channel', ['18.8', '19.95'])\n+\n #########################\n if __name__ == \"__main__\":\n \n"
},
{
"change_type": "MODIFY",
"old_path": "siliconcompiler/pdks/skywater130.py",
"new_path": "siliconcompiler/pdks/skywater130.py",
"diff": "@@ -143,6 +143,13 @@ def setup(chip):\n chip.set('pdk','grid', stackup, 'met5', 'ypitch', 3.4)\n chip.set('pdk','grid', stackup, 'met5', 'adj', 0.5)\n \n+ # Defaults for OpenROAD tool variables\n+ chip.set('pdk', 'variable', stackup, 'openroad', 'place_density', ['0.6'])\n+ chip.set('pdk', 'variable', stackup, 'openroad', 'pad_global_place', ['4'])\n+ chip.set('pdk', 'variable', stackup, 'openroad', 'pad_detail_place', ['2'])\n+ chip.set('pdk', 'variable', stackup, 'openroad', 'macro_place_halo', ['1', '1'])\n+ chip.set('pdk', 'variable', stackup, 'openroad', 'macro_place_channel', ['80', '80'])\n+\n #########################\n if __name__ == \"__main__\":\n \n"
},
{
"change_type": "MODIFY",
"old_path": "siliconcompiler/tools/openroad/openroad.py",
"new_path": "siliconcompiler/tools/openroad/openroad.py",
"diff": "@@ -102,66 +102,26 @@ def setup(chip, mode='batch'):\n chip.error = 1\n chip.logger.error(f'Stackup and logiclib parameters required for OpenROAD.')\n \n-\n- # defining default dictionary\n- default_options = {\n- 'place_density': [],\n- 'pad_global_place': [],\n- 'pad_detail_place': [],\n- 'macro_place_halo': [],\n- 'macro_place_channel': []\n- }\n-\n- # Setting up technologies with default values\n- # NOTE: no reasonable defaults, for halo and channel.\n- # TODO: Could possibly scale with node number for default, but safer to error out?\n- # perhaps we should use node as comp instead?\n- if chip.get('pdk', 'process'):\n- process = chip.get('pdk', 'process')\n- if process == 'freepdk45':\n- default_options = {\n- 'place_density': ['0.3'],\n- 'pad_global_place': ['2'],\n- 'pad_detail_place': ['1'],\n- 'macro_place_halo': ['22.4', '15.12'],\n- 'macro_place_channel': ['18.8', '19.95']\n- }\n- elif process == 'asap7':\n- default_options = {\n- 'place_density': ['0.77'],\n- 'pad_global_place': ['2'],\n- 'pad_detail_place': ['1'],\n- 'macro_place_halo': ['22.4', '15.12'],\n- 'macro_place_channel': ['18.8', '19.95']\n- }\n- elif process == 'skywater130':\n- default_options = {\n- 'place_density': ['0.6'],\n- 'pad_global_place': ['4'],\n- 'pad_detail_place': ['2'],\n- 'macro_place_halo': ['1', '1'],\n- 'macro_place_channel': ['80', '80']\n- }\n- else:\n- chip.error = 1\n- chip.logger.error(f'Process {process} not supported with OpenROAD.')\n- else:\n- default_options = {\n- 'place_density': ['1'],\n- 'pad_global_place': ['<space>'],\n- 'pad_detail_place': ['<space>'],\n- 'macro_place_halo': ['<xspace>', '<yspace>'],\n- 'macro_place_channel': ['<xspace>', '<yspace>']\n- }\n-\n- for option in default_options:\n- if chip.valid('eda', tool, 'variable', step, index, option, quiet=True, default_valid=False):\n- chip.logger.info('User provided variable %s OpenROAD flow detected.', option)\n- elif not default_options[option]:\n- chip.error = 1\n- chip.logger.error('Missing variable %s for OpenROAD.', option)\n- else:\n- chip.set('eda', tool, 'variable', step, index, option, default_options[option], clobber=clobber)\n+ variables = (\n+ 'place_density',\n+ 'pad_global_place',\n+ 'pad_detail_place',\n+ 'macro_place_halo',\n+ 'macro_place_channel'\n+ )\n+ for variable in variables:\n+ # For each OpenROAD tool variable, read default from PDK and write it\n+ # into schema. If PDK doesn't contain a default, the value must be set\n+ # by the user, so we add the variable keypath as a requirement.\n+ if chip.valid('pdk', 'variable', tool, stackup, variable):\n+ value = chip.get('pdk', 'variable', tool, stackup, variable)\n+ # Clobber needs to be False here, since a user might want to\n+ # overwrite these.\n+ chip.set('eda', tool, 'variable', step, index, variable, value,\n+ clobber=False)\n+\n+ keypath = ','.join(['eda', tool, 'variable', step, index, variable])\n+ chip.add('eda', tool, 'require', step, index, keypath)\n \n for clock in chip.getkeys('clock'):\n chip.add('eda', tool, 'require', step, index, ','.join(['clock', clock, 'period']))\n"
}
] |
2aa226f7dba49d10d7175e151903f65282212ba0 | siliconcompiler/siliconcompiler | 15.03.2022 11:43:40 | Apache License 2.0 | Fix RST parsing in schemagen
- Use Python standardized docstring "trim" function to preserve newlines
and indentation in schelp strings.
- Fix schemagen parse_rst() to handle multiline text | [
{
"change_type": "MODIFY",
"old_path": "docs/_ext/dynamicgen.py",
"new_path": "docs/_ext/dynamicgen.py",
"diff": "@@ -19,7 +19,7 @@ import subprocess\n \n from common import *\n \n-import siliconcompiler\n+from siliconcompiler import utils\n \n #############\n # Helpers\n@@ -98,37 +98,6 @@ def build_config_recursive(schema, keypath_prefix=[], sec_key_prefix=[]):\n # schema without leaves.\n return child_sections\n \n-def trim(docstring):\n- '''Helper function for cleaning up indentation of docstring.\n-\n- This is important for properly parsing complex RST in our docs.\n-\n- Source:\n- https://www.python.org/dev/peps/pep-0257/#handling-docstring-indentation'''\n- if not docstring:\n- return ''\n- # Convert tabs to spaces (following the normal Python rules)\n- # and split into a list of lines:\n- lines = docstring.expandtabs().splitlines()\n- # Determine minimum indentation (first line doesn't count):\n- indent = sys.maxsize\n- for line in lines[1:]:\n- stripped = line.lstrip()\n- if stripped:\n- indent = min(indent, len(line) - len(stripped))\n- # Remove indentation (first line is special):\n- trimmed = [lines[0].strip()]\n- if indent < sys.maxsize:\n- for line in lines[1:]:\n- trimmed.append(line[indent:].rstrip())\n- # Strip off trailing and leading blank lines:\n- while trimmed and not trimmed[-1]:\n- trimmed.pop()\n- while trimmed and not trimmed[0]:\n- trimmed.pop(0)\n- # Return a single string:\n- return '\\n'.join(trimmed)\n-\n #############\n # Base class\n #############\n@@ -159,7 +128,7 @@ class DynamicGen(SphinxDirective):\n # raw docstrings have funky indentation (basically, each line is already\n # indented as much as the function), so we call trim() helper function\n # to clean it up\n- docstr = trim(make_docs.__doc__)\n+ docstr = utils.trim(make_docs.__doc__)\n \n if docstr:\n self.parse_rst(docstr, s)\n"
},
{
"change_type": "MODIFY",
"old_path": "docs/_ext/schemagen.py",
"new_path": "docs/_ext/schemagen.py",
"diff": "@@ -67,8 +67,9 @@ class SchemaGen(SphinxDirective):\n \n def parse_rst(self, content):\n rst = ViewList()\n- # use fake filename 'inline' and fake line number '1' for error reporting\n- rst.append(content, 'inline', 1)\n+ # use fake filename 'inline' for error # reporting\n+ for i, line in enumerate(content.split('\\n')):\n+ rst.append(line, 'inline', i)\n body = nodes.paragraph()\n nested_parse_with_titles(self.state, rst, body)\n \n"
},
{
"change_type": "MODIFY",
"old_path": "siliconcompiler/schema.py",
"new_path": "siliconcompiler/schema.py",
"diff": "@@ -1,5 +1,7 @@\n # Copyright 2020 Silicon Compiler Authors. All Rights Reserved.\n \n+from siliconcompiler import utils\n+\n import re\n import os\n import sys\n@@ -49,9 +51,8 @@ def scparam(cfg,\n schelp=schelp)\n else:\n \n- # removing leading newline and space\n- schelp = re.sub(r'\\n\\s*', \" \", schelp)\n- schelp = schelp.strip()\n+ # removing leading spaces as if schelp were a docstring\n+ schelp = utils.trim(schelp)\n \n # setting valus based on types\n # note (bools are never lists)\n@@ -1880,6 +1881,7 @@ def schema_record(cfg, step='default', index='default'):\n 'region' : ['cloud region',\n 'US Gov Boston',\n \"\"\"Recommended naming methodology:\n+\n * local: node is the local machine\n * onprem: node in on-premises IT infrastructure\n * public: generic public cloud\n@@ -1903,6 +1905,7 @@ def schema_record(cfg, step='default', index='default'):\n }\n \n for item,val in records.items():\n+ helpext = utils.trim(val[2])\n scparam(cfg, ['record', step, index, item],\n sctype='str',\n scope='job',\n@@ -1911,10 +1914,7 @@ def schema_record(cfg, step='default', index='default'):\n example=[\n f\"cli: -record_{item} 'dfm 0 <{val[1]}>'\",\n f\"api: chip.set('record','dfm','0','{item}', <{val[1]}>)\"],\n- schelp=f\"\"\"\n- Record tracking the {val[0]} per step and index basis.\n- {val[2]}\n- \"\"\")\n+ schelp=f'Record tracking the {val[0]} per step and index basis. {helpext}')\n \n return cfg\n \n"
},
{
"change_type": "MODIFY",
"old_path": "siliconcompiler/utils.py",
"new_path": "siliconcompiler/utils.py",
"diff": "@@ -1,5 +1,6 @@\n import os\n import shutil\n+import sys\n \n def copytree(src, dst, ignore=[], dirs_exist_ok=False, link=False):\n '''Simple implementation of shutil.copytree to give us a dirs_exist_ok\n@@ -23,3 +24,34 @@ def copytree(src, dst, ignore=[], dirs_exist_ok=False, link=False):\n os.link(srcfile, dstfile)\n else:\n shutil.copy2(srcfile, dstfile)\n+\n+def trim(docstring):\n+ '''Helper function for cleaning up indentation of docstring.\n+\n+ This is important for properly parsing complex RST in our docs.\n+\n+ Source:\n+ https://www.python.org/dev/peps/pep-0257/#handling-docstring-indentation'''\n+ if not docstring:\n+ return ''\n+ # Convert tabs to spaces (following the normal Python rules)\n+ # and split into a list of lines:\n+ lines = docstring.expandtabs().splitlines()\n+ # Determine minimum indentation (first line doesn't count):\n+ indent = sys.maxsize\n+ for line in lines[1:]:\n+ stripped = line.lstrip()\n+ if stripped:\n+ indent = min(indent, len(line) - len(stripped))\n+ # Remove indentation (first line is special):\n+ trimmed = [lines[0].strip()]\n+ if indent < sys.maxsize:\n+ for line in lines[1:]:\n+ trimmed.append(line[indent:].rstrip())\n+ # Strip off trailing and leading blank lines:\n+ while trimmed and not trimmed[-1]:\n+ trimmed.pop()\n+ while trimmed and not trimmed[0]:\n+ trimmed.pop(0)\n+ # Return a single string:\n+ return '\\n'.join(trimmed)\n"
}
] |
7c63859ebda3a519db318f3105862d46621e36cd | siliconcompiler/siliconcompiler | 26.05.2022 17:08:29 | Apache License 2.0 | Make _print_tcl() more robust
- Use {} to escape strings, so that we can have spaces and newlines
- Dump tuples as lists to avoid parsing
- Make sure envvar syntax gets subbed in anywhere in string | [
{
"change_type": "MODIFY",
"old_path": "siliconcompiler/core.py",
"new_path": "siliconcompiler/core.py",
"diff": "@@ -1472,6 +1472,38 @@ class Chip:\n else:\n fout.write(f\"{keypath},{value}\")\n \n+ ###########################################################################\n+ def _escape_val_tcl(self, val, typestr):\n+ '''Recursive helper function for converting Python values to safe TCL\n+ values, based on the SC type string.'''\n+ if val is None:\n+ return ''\n+ elif typestr.startswith('('):\n+ subtypes = typestr.strip('()').split(',')\n+ valstr = ' '.join(self._escape_val_tcl(v, subtype.strip())\n+ for v, subtype in zip(val, subtypes))\n+ return f'[list {valstr}]'\n+ elif typestr.startswith('['):\n+ subtype = typestr.strip('[]')\n+ valstr = ' '.join(self._escape_val_tcl(v, subtype) for v in val)\n+ return f'[list {valstr}]'\n+ elif typestr == 'bool':\n+ return 'true' if val else 'false'\n+ elif typestr == 'str':\n+ # Surrounding a string with '{}' ensures that special characters\n+ # inside the string are treated as-is.\n+ # Source: https://datacadamia.com/lang/tcl/special_character#about\n+ escaped_val = val.replace('{', '\\\\{').replace('}', '\\\\}')\n+ return '{' + escaped_val + '}'\n+ elif typestr in ('file', 'dir'):\n+ # Replace $VAR with $env(VAR) for tcl\n+ # Surround with quotes to escape whitespace. We don't want to use {}\n+ # here, since that will break envvar expansion.\n+ return '\"' + re.sub(r'\\$(\\w+)', r'$env(\\1)', val) + '\"'\n+ else:\n+ # floats/ints just become strings\n+ return str(val)\n+\n ###########################################################################\n def _print_tcl(self, cfg, fout=None, prefix=\"\"):\n '''\n@@ -1487,29 +1519,17 @@ class Chip:\n for key in allkeys:\n typestr = self.get(*key, cfg=cfg, field='type')\n value = self.get(*key, cfg=cfg)\n- # everything becomes a list\n- # convert None to empty list\n- if value is None:\n- alist = []\n- elif bool(re.match(r'\\[', typestr)):\n- alist = value\n- elif typestr == \"bool\" and value:\n- alist = [\"true\"]\n- elif typestr == \"bool\" and not value:\n- alist = [\"false\"]\n- else:\n- alist = [value]\n-\n- #replace $VAR with env(VAR) for tcl\n- for i, val in enumerate(alist):\n- m = re.match(r'\\$(\\w+)(.*)', str(val))\n- if m:\n- alist[i] = ('$env(' + m.group(1) + ')' + m.group(2))\n \n #create a TCL dict\n keystr = ' '.join(key)\n- valstr = ' '.join(map(str, alist)).replace(';', '\\\\;')\n- outstr = f\"{prefix} {keystr} [list {valstr}]\\n\"\n+\n+ valstr = self._escape_val_tcl(value, typestr)\n+\n+ if not (typestr.startswith('[') or typestr.startswith('(')):\n+ # treat scalars as lists as well\n+ valstr = f'[list {valstr}]'\n+\n+ outstr = f\"{prefix} {keystr} {valstr}\\n\"\n \n #print out all non default values\n if 'default' not in key:\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/core/test_write_manifest.py",
"new_path": "tests/core/test_write_manifest.py",
"diff": "@@ -1,4 +1,6 @@\n # Copyright 2020 Silicon Compiler Authors. All Rights Reserved.\n+import pytest\n+\n import siliconcompiler\n \n def test_write_manifest():\n@@ -15,6 +17,55 @@ def test_write_manifest():\n chip.write_manifest('top.tcl', prune=False)\n chip.write_manifest('top.yaml')\n \n+def test_advanced_tcl(monkeypatch):\n+ # Tkinter module is part of Python standard library, but may not be\n+ # available depending on if the system has the python3-tk installed. This\n+ # line will import tkinter if it's available, and skip the test otherwise.\n+ tkinter = pytest.importorskip('tkinter')\n+\n+ chip = siliconcompiler.Chip('top')\n+\n+ # Test complex strings\n+ desc = '''This description is potentially problematic since it includes\n+multiple lines, spaces, and TCL special characters. This package costs $5 {for real}!'''\n+ chip.set('package', 'description', desc)\n+\n+ # Test tuples\n+ chip.add('asic', 'diearea', (0, 0))\n+ chip.add('asic', 'diearea', (30, 40))\n+\n+ # Test bools\n+ chip.set('option', 'quiet', True)\n+\n+ # Test envvars\n+ chip.set('input', 'verilog', 'rtl/$TOPMOD.v')\n+\n+ chip.write_manifest('top.tcl')\n+\n+ # Read from config in TCL as test\n+ tcl = tkinter.Tcl()\n+\n+ # Set env var to test ['input', 'verilog']\n+ monkeypatch.setenv('TOPMOD', 'design')\n+\n+ def tcl_eval(expr):\n+ script = f'''\n+ source top.tcl\n+ return {expr}'''\n+ return tcl.eval(script)\n+\n+ # When we call puts on a multiline string, it does get mangled a bit. It\n+ # stays surrounded by {}, and {} within the string are still escaped.\n+ # TODO: is this problematic? I think it is okay for now since we don't\n+ # really read these strings within TCL, they just need to not break sourcing\n+ # the manifest.\n+ expected_desc = '{' + desc.replace('{', '\\\\{').replace('}', '\\\\}') + '}'\n+ assert tcl_eval('[dict get $sc_cfg package description]') == expected_desc\n+\n+ assert tcl_eval('[lindex [lindex [dict get $sc_cfg asic diearea] 1] 0]') == '30.0'\n+ assert tcl_eval('[dict get $sc_cfg option quiet]') == 'true'\n+ assert tcl_eval('[dict get $sc_cfg input verilog]') == 'rtl/design.v'\n+\n #########################\n if __name__ == \"__main__\":\n test_write_manifest()\n"
}
] |
01ebf8a093f06eaee4bccd58943c1d8f072f2418 | siliconcompiler/siliconcompiler | 31.05.2022 15:29:30 | Apache License 2.0 | Tweak string escape approach
Use "" instead of {} to remove persistent backslashes in front of curly
braces in string. | [
{
"change_type": "MODIFY",
"old_path": "siliconcompiler/core.py",
"new_path": "siliconcompiler/core.py",
"diff": "@@ -1479,27 +1479,37 @@ class Chip:\n if val is None:\n return ''\n elif typestr.startswith('('):\n+ # Recurse into each item of tuple\n subtypes = typestr.strip('()').split(',')\n valstr = ' '.join(self._escape_val_tcl(v, subtype.strip())\n for v, subtype in zip(val, subtypes))\n return f'[list {valstr}]'\n elif typestr.startswith('['):\n+ # Recurse into each item of list\n subtype = typestr.strip('[]')\n valstr = ' '.join(self._escape_val_tcl(v, subtype) for v in val)\n return f'[list {valstr}]'\n elif typestr == 'bool':\n return 'true' if val else 'false'\n elif typestr == 'str':\n- # Surrounding a string with '{}' ensures that special characters\n- # inside the string are treated as-is.\n- # Source: https://datacadamia.com/lang/tcl/special_character#about\n- escaped_val = val.replace('{', '\\\\{').replace('}', '\\\\}')\n- return '{' + escaped_val + '}'\n+ # Escape string by surrounding it with \"\" and escaping the few\n+ # special characters that still get considered inside \"\". We don't\n+ # use {}, since this requires adding permanent backslashes to any\n+ # curly braces inside the string.\n+ # Source: https://www.tcl.tk/man/tcl8.4/TclCmd/Tcl.html (section [4] on)\n+ escaped_val = (val.replace('\\\\', '\\\\\\\\') # escape '\\' to avoid backslash substition (do this first, since other replaces insert '\\')\n+ .replace('[', '\\\\[') # escape '[' to avoid command substition\n+ .replace('$', '\\\\$') # escape '$' to avoid variable substition\n+ .replace('\"', '\\\\\"')) # escape '\"' to avoid string terminating early\n+ return '\"' + escaped_val + '\"'\n elif typestr in ('file', 'dir'):\n # Replace $VAR with $env(VAR) for tcl\n- # Surround with quotes to escape whitespace. We don't want to use {}\n- # here, since that will break envvar expansion.\n- return '\"' + re.sub(r'\\$(\\w+)', r'$env(\\1)', val) + '\"'\n+ val = re.sub(r'\\$(\\w+)', r'$env(\\1)', val)\n+ # Same escapes as applied to string, minus $ (since we want to resolve env vars).\n+ escaped_val = (val.replace('\\\\', '\\\\\\\\') # escape '\\' to avoid backslash substition (do this first, since other replaces insert '\\')\n+ .replace('[', '\\\\[') # escape '[' to avoid command substition\n+ .replace('\"', '\\\\\"')) # escape '\"' to avoid string terminating early\n+ return '\"' + escaped_val + '\"'\n else:\n # floats/ints just become strings\n return str(val)\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/core/test_write_manifest.py",
"new_path": "tests/core/test_write_manifest.py",
"diff": "@@ -19,8 +19,9 @@ def test_write_manifest():\n \n def test_advanced_tcl(monkeypatch):\n # Tkinter module is part of Python standard library, but may not be\n- # available depending on if the system has the python3-tk installed. This\n- # line will import tkinter if it's available, and skip the test otherwise.\n+ # available depending on if the system has the python3-tk package installed.\n+ # This line will import tkinter if it's available, and skip the test\n+ # otherwise.\n tkinter = pytest.importorskip('tkinter')\n \n chip = siliconcompiler.Chip('top')\n@@ -54,12 +55,8 @@ multiple lines, spaces, and TCL special characters. This package costs $5 {for r\n return {expr}'''\n return tcl.eval(script)\n \n- # When we call puts on a multiline string, it does get mangled a bit. It\n- # stays surrounded by {}, and {} within the string are still escaped.\n- # TODO: is this problematic? I think it is okay for now since we don't\n- # really read these strings within TCL, they just need to not break sourcing\n- # the manifest.\n- expected_desc = '{' + desc.replace('{', '\\\\{').replace('}', '\\\\}') + '}'\n+ # When the TCL shell displays a multiline string, it gets surrounded in {}.\n+ expected_desc = '{' + desc + '}'\n assert tcl_eval('[dict get $sc_cfg package description]') == expected_desc\n \n assert tcl_eval('[lindex [lindex [dict get $sc_cfg asic diearea] 1] 0]') == '30.0'\n"
}
] |
579c9d4deca09dbb7a615bfb98c7678723b2aefd | siliconcompiler/siliconcompiler | 21.06.2022 13:48:24 | Apache License 2.0 | Make tool regex set errors/warnings metrics
- "errors" and "warnings" suffix is special case
- Also update docs for relevant schema param and check_logfile() | [
{
"change_type": "MODIFY",
"old_path": "siliconcompiler/core.py",
"new_path": "siliconcompiler/core.py",
"diff": "@@ -2909,17 +2909,22 @@ class Chip:\n '''\n Checks logfile for patterns found in the 'regex' parameter.\n \n- Reads the content of the step's log file and compares the\n- content found in step 'regex' parameter. The matches are\n- stored in the file 'reports/<design>.<suffix>' in the run directory.\n- The matches are printed to STDOUT if display is set to True.\n+ Reads the content of the tasks's log file and compares the content found\n+ in the task's 'regex' parameter. The matches are stored in the file\n+ '<design>.<suffix>' in the current directory. The matches are printed to\n+ STDOUT if display is set to True.\n \n Args:\n- step (str): Task step name ('syn', 'place', etc)\n jobname (str): Jobid directory name\n+ step (str): Task step name ('syn', 'place', etc)\n index (str): Task index\n+ logfile (str): Path to logfile. If None, {step}.log is used.\n display (bool): If True, printes matches to STDOUT.\n \n+ Returns:\n+ Dictionary mapping suffixes to number of matches for that suffix's\n+ regex.\n+\n Examples:\n >>> chip.check_logfile('place')\n Searches for regex matches in the place logfile.\n@@ -2943,6 +2948,7 @@ class Chip:\n # Creating local dictionary (for speed)\n # self.get is slow\n checks = {}\n+ matches = {}\n regex_list = []\n if self.valid('tool', tool, 'regex', step, index, 'default'):\n regex_list = self.getkeys('tool', tool, 'regex', step, index)\n@@ -2950,6 +2956,7 @@ class Chip:\n checks[suffix] = {}\n checks[suffix]['report'] = open(f\"{step}.{suffix}\", \"w\")\n checks[suffix]['args'] = self.get('tool', tool, 'regex', step, index, suffix)\n+ matches[suffix] = 0\n \n # Looping through patterns for each line\n with open(logfile) as f:\n@@ -2962,12 +2969,15 @@ class Chip:\n else:\n string = self.grep(item, string)\n if string is not None:\n+ matches[suffix] += 1\n #always print to file\n print(string.strip(), file=checks[suffix]['report'])\n #selectively print to display\n if display:\n self.logger.info(string.strip())\n \n+ return matches\n+\n ###########################################################################\n def _find_leaves(self, steplist):\n '''Helper to find final (leaf) tasks for a given steplist.'''\n@@ -4001,7 +4011,11 @@ class Chip:\n ##################\n # 18. Check log file (must be after post-process)\n if (tool not in self.builtin) and (not self.get('option', 'skipall')) :\n- self.check_logfile(step=step, index=index, display=not quiet)\n+ matches = self.check_logfile(step=step, index=index, display=not quiet)\n+ if 'errors' in matches:\n+ self.set('metric', step, index, 'errors', matches['errors'])\n+ if 'warnings' in matches:\n+ self.set('metric', step, index, 'warnings', matches['warnings'])\n \n ##################\n # 19. Hash files\n"
},
{
"change_type": "MODIFY",
"old_path": "siliconcompiler/schema.py",
"new_path": "siliconcompiler/schema.py",
"diff": "@@ -1380,8 +1380,8 @@ def schema_tool(cfg, tool='default', step='default', index='default'):\n shorthelp=\"Tool: regex filter\",\n switch=\"-tool_regex 'tool step index suffix <str>'\",\n example=[\n- \"cli: -tool_regex 'openroad place 0 error -v ERROR'\",\n- \"api: chip.set('tool','openroad','regex','place','0','error','-v ERROR')\"],\n+ \"cli: -tool_regex 'openroad place 0 errors -v ERROR'\",\n+ \"api: chip.set('tool','openroad','regex','place','0','errors','-v ERROR')\"],\n schelp=\"\"\"\n A list of piped together grep commands. Each entry represents a set\n of command line arguments for grep including the regex pattern to\n@@ -1400,6 +1400,10 @@ def schema_tool(cfg, tool='default', step='default', index='default'):\n SiliconCompiler::\n \n chip.set('tool', 'openroad', 'regex', 'place', '0', 'warnings', [\"WARNING\", \"-v bbox\"])\n+\n+ The \"errors\" and \"warnings\" suffixes are special cases. When set,\n+ the number of matches found for these regexes will be used to set\n+ the errors and warnings metrics for the task, respectively.\n \"\"\")\n \n \n"
},
{
"change_type": "MODIFY",
"old_path": "tests/core/data/defaults.json",
"new_path": "tests/core/data/defaults.json",
"diff": "@@ -6219,10 +6219,10 @@\n \"default\": {\n \"defvalue\": [],\n \"example\": [\n- \"cli: -tool_regex 'openroad place 0 error -v ERROR'\",\n- \"api: chip.set('tool','openroad','regex','place','0','error','-v ERROR')\"\n+ \"cli: -tool_regex 'openroad place 0 errors -v ERROR'\",\n+ \"api: chip.set('tool','openroad','regex','place','0','errors','-v ERROR')\"\n ],\n- \"help\": \"A list of piped together grep commands. Each entry represents a set\\nof command line arguments for grep including the regex pattern to\\nmatch. Starting with the first list entry, each grep output is piped\\ninto the following grep command in the list. Supported grep options\\ninclude ``-v`` and ``-e``. Patterns starting with \\\"-\\\" should be\\ndirectly preceeded by the ``-e`` option. The following example\\nillustrates the concept.\\n\\nUNIX grep:\\n\\n.. code-block:: bash\\n\\n $ grep WARNING place.log | grep -v \\\"bbox\\\" > place.warnings\\n\\nSiliconCompiler::\\n\\n chip.set('tool', 'openroad', 'regex', 'place', '0', 'warnings', [\\\"WARNING\\\", \\\"-v bbox\\\"])\",\n+ \"help\": \"A list of piped together grep commands. Each entry represents a set\\nof command line arguments for grep including the regex pattern to\\nmatch. Starting with the first list entry, each grep output is piped\\ninto the following grep command in the list. Supported grep options\\ninclude ``-v`` and ``-e``. Patterns starting with \\\"-\\\" should be\\ndirectly preceeded by the ``-e`` option. The following example\\nillustrates the concept.\\n\\nUNIX grep:\\n\\n.. code-block:: bash\\n\\n $ grep WARNING place.log | grep -v \\\"bbox\\\" > place.warnings\\n\\nSiliconCompiler::\\n\\n chip.set('tool', 'openroad', 'regex', 'place', '0', 'warnings', [\\\"WARNING\\\", \\\"-v bbox\\\"])\\n\\nThe \\\"errors\\\" and \\\"warnings\\\" suffixes are special cases. When set,\\nthe number of matches found for these regexes will be used to set\\nthe errors and warnings metrics for the task, respectively.\",\n \"lock\": \"false\",\n \"notes\": null,\n \"require\": null,\n"
}
] |
400e0b0e4fcb661888b8498ea3ff310c386aa75a | siliconcompiler/siliconcompiler | 21.06.2022 18:40:01 | Apache License 2.0 | Make regex errors/warnings additive, add reports
- Enables tools to extract additional errors/warnings from other logs
- Add logfiles under reports to reduce boilerplate, the logfiles always
belong if regex is used | [
{
"change_type": "MODIFY",
"old_path": "siliconcompiler/client.py",
"new_path": "siliconcompiler/client.py",
"diff": "@@ -54,11 +54,8 @@ def remote_preprocess(chip):\n for index in indexlist:\n tool = chip.get('flowgraph', flow, local_step, index, 'tool')\n # Setting up tool is optional (step may be a builtin function)\n- if tool:\n- chip.set('arg', 'step', local_step)\n- chip.set('arg', 'index', index)\n- func = chip.find_function(tool, 'setup', 'tools')\n- func(chip)\n+ if tool and tool not in chip.builtin:\n+ chip._setup_tool(tool, local_step, index)\n \n # Need to override steplist here to make sure check_manifest() doesn't\n # check steps that haven't been setup.\n"
},
{
"change_type": "MODIFY",
"old_path": "siliconcompiler/core.py",
"new_path": "siliconcompiler/core.py",
"diff": "@@ -4018,9 +4018,13 @@ class Chip:\n if (tool not in self.builtin) and (not self.get('option', 'skipall')) :\n matches = self.check_logfile(step=step, index=index, display=not quiet)\n if 'errors' in matches:\n- self.set('metric', step, index, 'errors', matches['errors'])\n+ errors = self.get('metric', step, index, 'errors')\n+ errors += matches['errors']\n+ self.set('metric', step, index, 'errors', errors)\n if 'warnings' in matches:\n- self.set('metric', step, index, 'warnings', matches['warnings'])\n+ warnings = self.get('metric', step, index, 'warnings')\n+ warnings += matches['warnings']\n+ self.set('metric', step, index, 'warnings', warnings)\n \n ##################\n # 19. Hash files\n@@ -4104,6 +4108,36 @@ class Chip:\n else:\n os.remove(path)\n \n+ ###########################################################################\n+ def _setup_tool(self, tool, step, index):\n+ self.set('arg','step', step)\n+ self.set('arg','index', index)\n+\n+ func = self.find_function(tool, 'setup', 'tools')\n+ if func is None:\n+ self.logger.error(f'setup() not found for tool {tool}')\n+ sys.exit(1)\n+ func(self)\n+\n+ re_keys = self.getkeys('tool', tool, 'regex', step, index)\n+ logfile = f'{step}.log'\n+ if (\n+ 'errors' in re_keys and\n+ logfile not in self.get('tool', tool, 'report', step, index, 'errors')\n+ ):\n+ self.add('tool', tool, 'report', step, index, 'errors', logfile)\n+\n+ if (\n+ 'warnings' in re_keys and\n+ logfile not in self.get('tool', tool, 'report', step, index, 'warnings')\n+ ):\n+ self.add('tool', tool, 'report', step, index, 'warnings', logfile)\n+\n+ # Need to clear index, otherwise we will skip setting up other indices.\n+ # Clear step for good measure.\n+ self.set('arg','step', None)\n+ self.set('arg','index', None)\n+\n ###########################################################################\n def run(self):\n '''\n@@ -4287,18 +4321,7 @@ class Chip:\n # Setting up tool is optional\n tool = self.get('flowgraph', flow, step, index, 'tool')\n if tool not in self.builtin:\n- self.set('arg','step', step)\n- self.set('arg','index', index)\n- func = self.find_function(tool, 'setup', 'tools')\n- if func is None:\n- self.logger.error(f'setup() not found for tool {tool}')\n- sys.exit(1)\n- func(self)\n- # Need to clear index, otherwise we will skip\n- # setting up other indices. Clear step for good\n- # measure.\n- self.set('arg','step', None)\n- self.set('arg','index', None)\n+ self._setup_tool(tool, step, index)\n \n # Implement auto-update of jobincrement\n try:\n"
},
{
"change_type": "MODIFY",
"old_path": "siliconcompiler/schema.py",
"new_path": "siliconcompiler/schema.py",
"diff": "@@ -1402,9 +1402,10 @@ def schema_tool(cfg, tool='default', step='default', index='default'):\n chip.set('tool', 'openroad', 'regex', 'place', '0', 'warnings', [\"WARNING\", \"-v bbox\"])\n \n The \"errors\" and \"warnings\" suffixes are special cases. When set,\n- the number of matches found for these regexes will be used to set\n- the errors and warnings metrics for the task, respectively.\n- \"\"\")\n+ the number of matches found for these regexes will be added to the\n+ errors and warnings metrics for the task, respectively. This will\n+ also cause the logfile to be added to the :keypath:`tool, <tool>,\n+ report` parameter for those metrics, if not already present.\"\"\")\n \n \n scparam(cfg, ['tool', tool, 'option', step, index],\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/core/data/defaults.json",
"new_path": "tests/core/data/defaults.json",
"diff": "@@ -6222,7 +6222,7 @@\n \"cli: -tool_regex 'openroad place 0 errors -v ERROR'\",\n \"api: chip.set('tool','openroad','regex','place','0','errors','-v ERROR')\"\n ],\n- \"help\": \"A list of piped together grep commands. Each entry represents a set\\nof command line arguments for grep including the regex pattern to\\nmatch. Starting with the first list entry, each grep output is piped\\ninto the following grep command in the list. Supported grep options\\ninclude ``-v`` and ``-e``. Patterns starting with \\\"-\\\" should be\\ndirectly preceeded by the ``-e`` option. The following example\\nillustrates the concept.\\n\\nUNIX grep:\\n\\n.. code-block:: bash\\n\\n $ grep WARNING place.log | grep -v \\\"bbox\\\" > place.warnings\\n\\nSiliconCompiler::\\n\\n chip.set('tool', 'openroad', 'regex', 'place', '0', 'warnings', [\\\"WARNING\\\", \\\"-v bbox\\\"])\\n\\nThe \\\"errors\\\" and \\\"warnings\\\" suffixes are special cases. When set,\\nthe number of matches found for these regexes will be used to set\\nthe errors and warnings metrics for the task, respectively.\",\n+ \"help\": \"A list of piped together grep commands. Each entry represents a set\\nof command line arguments for grep including the regex pattern to\\nmatch. Starting with the first list entry, each grep output is piped\\ninto the following grep command in the list. Supported grep options\\ninclude ``-v`` and ``-e``. Patterns starting with \\\"-\\\" should be\\ndirectly preceeded by the ``-e`` option. The following example\\nillustrates the concept.\\n\\nUNIX grep:\\n\\n.. code-block:: bash\\n\\n $ grep WARNING place.log | grep -v \\\"bbox\\\" > place.warnings\\n\\nSiliconCompiler::\\n\\n chip.set('tool', 'openroad', 'regex', 'place', '0', 'warnings', [\\\"WARNING\\\", \\\"-v bbox\\\"])\\n\\nThe \\\"errors\\\" and \\\"warnings\\\" suffixes are special cases. When set,\\nthe number of matches found for these regexes will be added to the\\nerrors and warnings metrics for the task, respectively. This will\\nalso cause the logfile to be added to the :keypath:`tool, <tool>,\\nreport` parameter for those metrics, if not already present.\",\n \"lock\": \"false\",\n \"notes\": null,\n \"require\": null,\n"
}
] |
d635433ebc9648a09beb499488c077fa87032efd | astropy/astroquery | 22.04.2022 17:23:32 | BSD 3-Clause New or Revised License | Add a spectral cutout example
Also include support for numpy arrays for band and channel, and autocorrect the band and channel values to be in ascending order | [
{
"change_type": "MODIFY",
"old_path": "astroquery/casda/core.py",
"new_path": "astroquery/casda/core.py",
"diff": "@@ -14,6 +14,7 @@ import astropy.coordinates as coord\n from astropy.table import Table\n from astropy.io.votable import parse\n from astroquery import log\n+import numpy as np\n \n # 3. local imports - use relative imports\n # commonly required local imports shown below as example\n@@ -125,7 +126,7 @@ class CasdaClass(BaseQuery):\n if channel is not None:\n raise ValueError(\"Either 'channel' or 'band' values may be provided but not both.\")\n \n- if (not isinstance(band, (list, tuple))) or len(band) != 2 or \\\n+ if (not isinstance(band, (list, tuple, np.ndarray))) or len(band) != 2 or \\\n (band[0] is not None and not isinstance(band[0], u.Quantity)) or \\\n (band[1] is not None and not isinstance(band[1], u.Quantity)):\n raise ValueError(\"The 'band' value must be a list of 2 wavelength or frequency values.\")\n@@ -137,22 +138,31 @@ class CasdaClass(BaseQuery):\n if bandBoundedLow or bandBoundedHigh:\n unit = band[0].unit if bandBoundedLow else band[1].unit\n if unit.physical_type == 'length':\n- min_band = '-Inf' if not bandBoundedLow else str(band[0].to(u.m).value)\n- max_band = '+Inf' if not bandBoundedHigh else str(band[1].to(u.m).value)\n+ min_band = '-Inf' if not bandBoundedLow else band[0].to(u.m).value\n+ max_band = '+Inf' if not bandBoundedHigh else band[1].to(u.m).value\n elif unit.physical_type == 'frequency':\n # Swap the order when changing frequency to wavelength\n- min_band = '-Inf' if not bandBoundedHigh else str(band[1].to(u.m, equivalencies=u.spectral()).value)\n- max_band = '+Inf' if not bandBoundedLow else str(band[0].to(u.m, equivalencies=u.spectral()).value)\n+ min_band = '-Inf' if not bandBoundedHigh else band[1].to(u.m, equivalencies=u.spectral()).value\n+ max_band = '+Inf' if not bandBoundedLow else band[0].to(u.m, equivalencies=u.spectral()).value\n else:\n raise ValueError(\"The 'band' values must be wavelengths or frequencies.\")\n+ # If values were provided in the wrong order, swap them\n+ if bandBoundedLow and bandBoundedHigh and min_band > max_band:\n+ temp_val = min_band\n+ min_band = max_band\n+ max_band = temp_val\n \n request_payload['BAND'] = f'{min_band} {max_band}'\n \n if channel is not None:\n- if not isinstance(channel, (list, tuple)) or len(channel) != 2 or \\\n- not isinstance(channel[0], int) or not isinstance(channel[1], int):\n+ if not isinstance(channel, (list, tuple, np.ndarray)) or len(channel) != 2 or \\\n+ not isinstance(channel[0], (int, np.integer)) or not isinstance(channel[1], (int, np.integer)):\n raise ValueError(\"The 'channel' value must be a list of 2 integer values.\")\n- request_payload['CHANNEL'] = f'{channel[0]} {channel[1]}'\n+ if channel[0] <= channel[1]:\n+ request_payload['CHANNEL'] = f'{channel[0]} {channel[1]}'\n+ else:\n+ # If values were provided in the wrong order, swap them\n+ request_payload['CHANNEL'] = f'{channel[1]} {channel[0]}'\n \n return request_payload\n \n@@ -310,7 +320,8 @@ class CasdaClass(BaseQuery):\n \n job_url = self._create_job(table, 'cutout_service', verbose)\n \n- cutout_spec = self._args_to_payload(radius=radius, **kwargs)\n+ cutout_spec = self._args_to_payload(radius=radius, coordinates=coordinates, height=height, width=width, \n+ band=band, channel=channel, verbose=verbose)\n \n if not cutout_spec:\n raise ValueError(\"Please provide cutout parameters such as coordinates, band or channel.\")\n"
},
{
"change_type": "MODIFY",
"old_path": "astroquery/casda/tests/test_casda.py",
"new_path": "astroquery/casda/tests/test_casda.py",
"diff": "@@ -12,6 +12,7 @@ import astropy.units as u\n from astropy.table import Table, Column\n from astropy.io.votable import parse\n from astroquery import log\n+import numpy as np\n \n from astroquery.casda import Casda\n \n@@ -348,6 +349,10 @@ def test_args_to_payload_band():\n assert payload['BAND'] == '0.195 0.215'\n assert list(payload.keys()) == ['BAND']\n \n+ payload = casda._args_to_payload(band=(0.215*u.m, 0.195*u.m))\n+ assert payload['BAND'] == '0.195 0.215'\n+ assert list(payload.keys()) == ['BAND']\n+\n payload = casda._args_to_payload(band=(0.195*u.m, 21.5*u.cm))\n assert payload['BAND'] == '0.195 0.215'\n assert list(payload.keys()) == ['BAND']\n@@ -364,6 +369,10 @@ def test_args_to_payload_band():\n assert payload['BAND'] == '0.19986163866666667 0.21112144929577467'\n assert list(payload.keys()) == ['BAND']\n \n+ payload = casda._args_to_payload(band=np.array([1.5, 1.42])*u.GHz)\n+ assert payload['BAND'] == '0.19986163866666667 0.21112144929577467'\n+ assert list(payload.keys()) == ['BAND']\n+\n payload = casda._args_to_payload(band=(None, 1.5*u.GHz))\n assert payload['BAND'] == '0.19986163866666667 +Inf'\n assert list(payload.keys()) == ['BAND']\n@@ -393,7 +402,7 @@ def test_args_to_payload_band_invalid():\n assert \"The 'band' values must have the same kind of units.\" in str(excinfo.value)\n \n with pytest.raises(ValueError) as excinfo:\n- casda._args_to_payload(band=(1.42*u.radian, 21*u.deg))\n+ casda._args_to_payload(band=[1.42*u.radian, 21*u.deg])\n assert \"The 'band' values must be wavelengths or frequencies.\" in str(excinfo.value)\n \n with pytest.raises(ValueError) as excinfo:\n@@ -407,7 +416,11 @@ def test_args_to_payload_channel():\n assert payload['CHANNEL'] == '0 30'\n assert list(payload.keys()) == ['CHANNEL']\n \n- payload = casda._args_to_payload(channel=(17, 23))\n+ payload = casda._args_to_payload(channel=np.array([17, 23]))\n+ assert payload['CHANNEL'] == '17 23'\n+ assert list(payload.keys()) == ['CHANNEL']\n+\n+ payload = casda._args_to_payload(channel=(23, 17))\n assert payload['CHANNEL'] == '17 23'\n assert list(payload.keys()) == ['CHANNEL']\n \n"
},
{
"change_type": "MODIFY",
"old_path": "docs/casda/casda.rst",
"new_path": "docs/casda/casda.rst",
"diff": "@@ -131,7 +131,7 @@ Spatial and spectral parameters can be combined to produce sub-cubes.\n \n Once completed, the cutouts can be downloaded as described in the section above.\n \n-An example script to download a cutout from the Rapid ASKAP Continuum Survey (RACS) at a specified position is shown\n+An example script to download a 2D cutout from the Rapid ASKAP Continuum Survey (RACS) at a specified position is shown\n below:\n \n .. doctest-skip::\n@@ -152,6 +152,26 @@ below:\n >>> url_list = casda.cutout(subset[:1], coordinates=centre, radius=14*u.arcmin)\n >>> filelist = casda.download_files(url_list, savedir='/tmp')\n \n+An example script to download a 3D cutout from the WALLABY Pre-Pilot Eridanus cube at a specified position and velocity\n+is shown below:\n+\n+.. doctest-skip::\n+\n+ >>> from astropy import coordinates, units as u, wcs\n+ >>> from astroquery.casda import Casda\n+ >>> import getpass\n+ >>> centre = coordinates.SkyCoord.from_name('NGC 1371')\n+ >>> username = 'email@somewhere.edu.au'\n+ >>> password = getpass.getpass(str(\"Enter your OPAL password: \"))\n+ >>> casda = Casda(username, password)\n+ >>> result = Casda.query_region(centre, radius=30*u.arcmin)\n+ >>> public_data = Casda.filter_out_unreleased(result)\n+ >>> eridanus_cube = public_data[public_data['filename'] == 'Eridanus_full_image_V3.fits']\n+ >>> vel = np.array([1250, 1600])*u.km/u.s\n+ >>> freq = vel.to(u.Hz, equivalencies=u.doppler_radio(1.420405751786*u.GHz))\n+ >>> url_list = casda.cutout(eridanus_cube, coordinates=centre, radius=9*u.arcmin, band=freq)\n+ >>> filelist = casda.download_files(url_list, savedir='/tmp')\n+\n \n Reference/API\n =============\n"
}
] |
31cedeada50149581f117e425c3191af8b6f0b97 | astropy/astroquery | 20.06.2022 19:30:54 | BSD 3-Clause New or Revised License | Validate `svo_fps` query parameter names locally
`SvoFpsClass.data_from_svo()` now checks the names of the query
parameters and only connects with the server if all names are valid. | [
{
"change_type": "MODIFY",
"old_path": "astroquery/svo_fps/core.py",
"new_path": "astroquery/svo_fps/core.py",
"diff": "@@ -9,11 +9,23 @@ from astropy.io.votable import parse_single_table\n from . import conf\n \n from ..query import BaseQuery\n+from astroquery.exceptions import InvalidQueryError\n+\n \n __all__ = ['SvoFpsClass', 'SvoFps']\n \n FLOAT_MAX = np.finfo(np.float64).max\n \n+# Valid query parameters taken from\n+# http://svo2.cab.inta-csic.es/theory/fps/index.php?mode=voservice\n+_params_with_range = {\"WavelengthRef\", \"WavelengthMean\", \"WavelengthEff\",\n+ \"WavelengthMin\", \"WavelengthMax\", \"WidthEff\", \"FWHM\"}\n+QUERY_PARAMETERS = _params_with_range.copy()\n+for suffix in (\"_min\", \"_max\"):\n+ QUERY_PARAMETERS.update(param + suffix for param in _params_with_range)\n+QUERY_PARAMETERS.update((\"Instrument\", \"Facility\", \"PhotSystem\", \"ID\", \"PhotCalID\",\n+ \"FORMAT\", \"VERB\"))\n+\n \n class SvoFpsClass(BaseQuery):\n \"\"\"\n@@ -35,8 +47,8 @@ class SvoFpsClass(BaseQuery):\n query : dict\n Used to create a HTTP query string i.e. send to SVO FPS to get data.\n In dictionary, specify keys as search parameters (str) and\n- values as required. List of search parameters can be found at\n- http://svo2.cab.inta-csic.es/theory/fps/fps.php?FORMAT=metadata\n+ values as required. Description of search parameters can be found at\n+ http://svo2.cab.inta-csic.es/theory/fps/index.php?mode=voservice\n error_msg : str, optional\n Error message to be shown in case no table element found in the\n responded VOTable. Use this to make error message verbose in context\n@@ -49,6 +61,14 @@ class SvoFpsClass(BaseQuery):\n astropy.table.table.Table object\n Table containing data fetched from SVO (in response to query)\n \"\"\"\n+ bad_params = [param for param in query if param not in QUERY_PARAMETERS]\n+ if bad_params:\n+ raise InvalidQueryError(\n+ f\"parameter{'s' if len(bad_params) > 1 else ''} \"\n+ f\"{', '.join(bad_params)} {'are' if len(bad_params) > 1 else 'is'} \"\n+ f\"invalid. For a description of valid query parameters see \"\n+ \"http://svo2.cab.inta-csic.es/theory/fps/index.php?mode=voservice\"\n+ )\n response = self._request(\"GET\", self.SVO_MAIN_URL, params=query,\n timeout=timeout or self.TIMEOUT,\n cache=cache)\n"
},
{
"change_type": "MODIFY",
"old_path": "astroquery/svo_fps/tests/test_svo_fps.py",
"new_path": "astroquery/svo_fps/tests/test_svo_fps.py",
"diff": "@@ -2,6 +2,7 @@ import pytest\n import os\n from astropy import units as u\n \n+from astroquery.exceptions import InvalidQueryError\n from astroquery.utils.mocks import MockResponse\n from ..core import SvoFps\n \n@@ -66,3 +67,12 @@ def test_get_filter_list(patch_get):\n table = SvoFps.get_filter_list(TEST_FACILITY, TEST_INSTRUMENT)\n # Check if column for Filter ID (named 'filterID') exists in table\n assert 'filterID' in table.colnames\n+\n+\n+def test_invalid_query(patch_get):\n+ msg = r\"^parameter bad_param is invalid\\. For a description of valid query \"\n+ with pytest.raises(InvalidQueryError, match=msg):\n+ SvoFps.data_from_svo(query={\"bad_param\": 0, \"FWHM\": 20})\n+ msg = r\"^parameters invalid_param, bad_param are invalid\\. For a description of \"\n+ with pytest.raises(InvalidQueryError, match=msg):\n+ SvoFps.data_from_svo(query={\"invalid_param\": 0, 'bad_param': -1})\n"
},
{
"change_type": "MODIFY",
"old_path": "astroquery/svo_fps/tests/test_svo_fps_remote.py",
"new_path": "astroquery/svo_fps/tests/test_svo_fps_remote.py",
"diff": "@@ -1,5 +1,4 @@\n import pytest\n-import astropy.io.votable.exceptions\n from astropy import units as u\n \n from ..core import SvoFps\n@@ -26,11 +25,3 @@ class TestSvoFpsClass:\n table = SvoFps.get_filter_list(test_facility, test_instrument)\n # Check if column for Filter ID (named 'filterID') exists in table\n assert 'filterID' in table.colnames\n-\n- # Test for failing case (a dummy invalid query)\n- def test_IndexError_in_data_from_svo(self):\n- invalid_query = {'Invalid_param': 0}\n- with pytest.raises(astropy.io.votable.exceptions.E09) as exc:\n- SvoFps.data_from_svo(invalid_query)\n-\n- assert 'must have a value attribute' in str(exc)\n"
}
] |
040f67665e7dbe682d07e8e1872cd782c263f951 | astropy/astroquery | 12.09.2022 15:01:41 | BSD 3-Clause New or Revised License | Simplify setting DummyResponse data in TAP+ tests
`DummyResponse` is used in `esa/jwst` and `gaia` tests for mimicking
server responses. Now the response data can be specified by only setting
the parameters that have non-default values. | [
{
"change_type": "MODIFY",
"old_path": "astroquery/esa/jwst/tests/test_jwsttap.py",
"new_path": "astroquery/esa/jwst/tests/test_jwsttap.py",
"diff": "@@ -236,10 +236,7 @@ class TestTap:\n responseLaunchJob = DummyResponse(200)\n jobDataFile = data_path('job_1.vot')\n jobData = utils.read_file_content(jobDataFile)\n- responseLaunchJob.set_data(method='POST',\n- context=None,\n- body=jobData,\n- headers=None)\n+ responseLaunchJob.set_data(method='POST', body=jobData)\n # The query contains decimals: force default response\n connHandler.set_default_response(responseLaunchJob)\n sc = SkyCoord(ra=29.0, dec=15.0, unit=(u.degree, u.degree),\n@@ -373,27 +370,18 @@ class TestTap:\n responseLaunchJob = DummyResponse(303)\n # list of list (httplib implementation for headers in response)\n launchResponseHeaders = [['location', 'http://test:1111/tap/async/' + jobid]]\n- responseLaunchJob.set_data(method='POST',\n- context=None,\n- body=None,\n- headers=launchResponseHeaders)\n+ responseLaunchJob.set_data(method='POST', headers=launchResponseHeaders)\n connHandler.set_default_response(responseLaunchJob)\n # Phase response\n responsePhase = DummyResponse(200)\n- responsePhase.set_data(method='GET',\n- context=None,\n- body=\"COMPLETED\",\n- headers=None)\n+ responsePhase.set_data(method='GET', body=\"COMPLETED\")\n req = \"async/\" + jobid + \"/phase\"\n connHandler.set_response(req, responsePhase)\n # Results response\n responseResultsJob = DummyResponse(200)\n jobDataFile = data_path('job_1.vot')\n jobData = utils.read_file_content(jobDataFile)\n- responseResultsJob.set_data(method='GET',\n- context=None,\n- body=jobData,\n- headers=None)\n+ responseResultsJob.set_data(method='GET', body=jobData)\n req = \"async/\" + jobid + \"/results/result\"\n connHandler.set_response(req, responseResultsJob)\n sc = SkyCoord(ra=29.0, dec=15.0, unit=(u.degree, u.degree),\n@@ -456,10 +444,7 @@ class TestTap:\n responseLaunchJob = DummyResponse(200)\n jobDataFile = data_path('job_1.vot')\n jobData = utils.read_file_content(jobDataFile)\n- responseLaunchJob.set_data(method='POST',\n- context=None,\n- body=jobData,\n- headers=None)\n+ responseLaunchJob.set_data(method='POST', body=jobData)\n ra = 19.0\n dec = 20.0\n sc = SkyCoord(ra=ra, dec=dec, unit=(u.degree, u.degree), frame='icrs')\n@@ -546,10 +531,7 @@ class TestTap:\n responseLaunchJob = DummyResponse(303)\n # list of list (httplib implementation for headers in response)\n launchResponseHeaders = [['location', 'http://test:1111/tap/async/' + jobid]]\n- responseLaunchJob.set_data(method='POST',\n- context=None,\n- body=None,\n- headers=launchResponseHeaders)\n+ responseLaunchJob.set_data(method='POST', headers=launchResponseHeaders)\n ra = 19\n dec = 20\n sc = SkyCoord(ra=ra, dec=dec, unit=(u.degree, u.degree), frame='icrs')\n@@ -557,20 +539,14 @@ class TestTap:\n connHandler.set_default_response(responseLaunchJob)\n # Phase response\n responsePhase = DummyResponse(200)\n- responsePhase.set_data(method='GET',\n- context=None,\n- body=\"COMPLETED\",\n- headers=None)\n+ responsePhase.set_data(method='GET', body=\"COMPLETED\")\n req = \"async/\" + jobid + \"/phase\"\n connHandler.set_response(req, responsePhase)\n # Results response\n responseResultsJob = DummyResponse(200)\n jobDataFile = data_path('job_1.vot')\n jobData = utils.read_file_content(jobDataFile)\n- responseResultsJob.set_data(method='GET',\n- context=None,\n- body=jobData,\n- headers=None)\n+ responseResultsJob.set_data(method='GET', body=jobData)\n req = \"async/\" + jobid + \"/results/result\"\n connHandler.set_response(req, responseResultsJob)\n job = tap.cone_search(sc, radius, async_job=True)\n"
},
{
"change_type": "MODIFY",
"old_path": "astroquery/gaia/tests/test_gaiatap.py",
"new_path": "astroquery/gaia/tests/test_gaiatap.py",
"diff": "@@ -48,10 +48,7 @@ class TestTap:\n \n message_text = \"1653401204784D[type: -100,-1]=Gaia dev is under maintenance\"\n \n- dummy_response.set_data(method='GET',\n- context=None,\n- body=message_text,\n- headers=None)\n+ dummy_response.set_data(method='GET', body=message_text)\n connHandler.set_default_response(dummy_response)\n \n # show_server_messages\n@@ -69,10 +66,7 @@ class TestTap:\n \n message_text = \"1653401204784D[type: -100,-1]=Gaia dev is under maintenance\"\n \n- dummy_response.set_data(method='GET',\n- context=None,\n- body=message_text,\n- headers=None)\n+ dummy_response.set_data(method='GET', body=message_text)\n conn_handler.set_default_response(dummy_response)\n \n # show_server_messages\n@@ -86,10 +80,7 @@ class TestTap:\n response_launch_job = DummyResponse(200)\n job_data_file = data_path('job_1.vot')\n job_data = utils.read_file_content(job_data_file)\n- response_launch_job.set_data(method='POST',\n- context=None,\n- body=job_data,\n- headers=None)\n+ response_launch_job.set_data(method='POST', body=job_data)\n # The query contains decimals: force default response\n conn_handler.set_default_response(response_launch_job)\n sc = SkyCoord(ra=29.0, dec=15.0, unit=(u.degree, u.degree),\n@@ -167,27 +158,18 @@ class TestTap:\n launch_response_headers = [\n ['location', 'http://test:1111/tap/async/' + jobid]\n ]\n- response_launch_job.set_data(method='POST',\n- context=None,\n- body=None,\n- headers=launch_response_headers)\n+ response_launch_job.set_data(method='POST', headers=launch_response_headers)\n conn_handler.set_default_response(response_launch_job)\n # Phase response\n response_phase = DummyResponse(200)\n- response_phase.set_data(method='GET',\n- context=None,\n- body=\"COMPLETED\",\n- headers=None)\n+ response_phase.set_data(method='GET', body=\"COMPLETED\")\n req = \"async/\" + jobid + \"/phase\"\n conn_handler.set_response(req, response_phase)\n # Results response\n response_results_job = DummyResponse(200)\n job_data_file = data_path('job_1.vot')\n job_data = utils.read_file_content(job_data_file)\n- response_results_job.set_data(method='GET',\n- context=None,\n- body=job_data,\n- headers=None)\n+ response_results_job.set_data(method='GET', body=job_data)\n req = \"async/\" + jobid + \"/results/result\"\n conn_handler.set_response(req, response_results_job)\n sc = SkyCoord(ra=29.0, dec=15.0, unit=(u.degree, u.degree),\n@@ -254,10 +236,7 @@ class TestTap:\n response_launch_job = DummyResponse(200)\n job_data_file = data_path('job_1.vot')\n job_data = utils.read_file_content(job_data_file)\n- response_launch_job.set_data(method='POST',\n- context=None,\n- body=job_data,\n- headers=None)\n+ response_launch_job.set_data(method='POST', body=job_data)\n ra = 19.0\n dec = 20.0\n sc = SkyCoord(ra=ra, dec=dec, unit=(u.degree, u.degree), frame='icrs')\n@@ -307,10 +286,7 @@ class TestTap:\n launch_response_headers = [\n ['location', 'http://test:1111/tap/async/' + jobid]\n ]\n- response_launch_job.set_data(method='POST',\n- context=None,\n- body=None,\n- headers=launch_response_headers)\n+ response_launch_job.set_data(method='POST', headers=launch_response_headers)\n ra = 19\n dec = 20\n sc = SkyCoord(ra=ra, dec=dec, unit=(u.degree, u.degree), frame='icrs')\n@@ -318,20 +294,14 @@ class TestTap:\n conn_handler.set_default_response(response_launch_job)\n # Phase response\n response_phase = DummyResponse(200)\n- response_phase.set_data(method='GET',\n- context=None,\n- body=\"COMPLETED\",\n- headers=None)\n+ response_phase.set_data(method='GET', body=\"COMPLETED\")\n req = \"async/\" + jobid + \"/phase\"\n conn_handler.set_response(req, response_phase)\n # Results response\n response_results_job = DummyResponse(200)\n job_data_file = data_path('job_1.vot')\n job_data = utils.read_file_content(job_data_file)\n- response_results_job.set_data(method='GET',\n- context=None,\n- body=job_data,\n- headers=None)\n+ response_results_job.set_data(method='GET', body=job_data)\n req = \"async/\" + jobid + \"/results/result\"\n conn_handler.set_response(req, response_results_job)\n job = tap.cone_search_async(sc, radius)\n@@ -459,27 +429,18 @@ class TestTap:\n launch_response_headers = [\n ['location', 'http://test:1111/tap/async/' + jobid]\n ]\n- response_launch_job.set_data(method='POST',\n- context=None,\n- body=None,\n- headers=launch_response_headers)\n+ response_launch_job.set_data(method='POST', headers=launch_response_headers)\n conn_handler.set_default_response(response_launch_job)\n # Phase response\n response_phase = DummyResponse(200)\n- response_phase.set_data(method='GET',\n- context=None,\n- body=\"COMPLETED\",\n- headers=None)\n+ response_phase.set_data(method='GET', body=\"COMPLETED\")\n req = \"async/\" + jobid + \"/phase\"\n conn_handler.set_response(req, response_phase)\n # Results response\n response_results_job = DummyResponse(200)\n job_data_file = data_path('job_1.vot')\n job_data = utils.read_file_content(job_data_file)\n- response_results_job.set_data(method='GET',\n- context=None,\n- body=job_data,\n- headers=None)\n+ response_results_job.set_data(method='GET', body=job_data)\n req = \"async/\" + jobid + \"/results/result\"\n conn_handler.set_response(req, response_results_job)\n query = (\"SELECT crossmatch_positional(\",\n"
},
{
"change_type": "MODIFY",
"old_path": "astroquery/utils/tap/conn/tests/DummyConn.py",
"new_path": "astroquery/utils/tap/conn/tests/DummyConn.py",
"diff": "@@ -29,7 +29,7 @@ class DummyConn:\n self.cookie = None\n self.ishttps = False\n \n- def request(self, method, context, body, headers):\n+ def request(self, method, context=None, body=None, headers=None):\n self.response.set_data(method, context, body, headers)\n \n def getresponse(self):\n"
},
{
"change_type": "MODIFY",
"old_path": "astroquery/utils/tap/conn/tests/DummyResponse.py",
"new_path": "astroquery/utils/tap/conn/tests/DummyResponse.py",
"diff": "@@ -33,7 +33,7 @@ class DummyResponse:\n self.status = status_code\n self.reason = self.STATUS_MESSAGES.get(status_code)\n \n- def set_data(self, method, context, body, headers):\n+ def set_data(self, method, context=None, body=None, headers=None):\n self.method = method\n self.context = context\n self.body = body\n"
},
{
"change_type": "MODIFY",
"old_path": "astroquery/utils/tap/model/tests/test_job.py",
"new_path": "astroquery/utils/tap/model/tests/test_job.py",
"diff": "@@ -42,10 +42,7 @@ def test_job_get_results(capsys, tmpdir):\n job.jobid = jobid\n job.parameters['format'] = outputFormat\n responseCheckPhase = DummyResponse(500)\n- responseCheckPhase.set_data(method='GET',\n- context=None,\n- body='FINISHED',\n- headers=None)\n+ responseCheckPhase.set_data(method='GET', body='FINISHED')\n waitRequest = f\"async/{jobid}/phase\"\n connHandler = DummyConnHandler()\n connHandler.set_response(waitRequest, responseCheckPhase)\n@@ -58,10 +55,7 @@ def test_job_get_results(capsys, tmpdir):\n responseGetData = DummyResponse(500)\n jobContentFileName = data_path('result_1.vot')\n jobContent = utils.read_file_content(jobContentFileName)\n- responseGetData.set_data(method='GET',\n- context=None,\n- body=jobContent,\n- headers=None)\n+ responseGetData.set_data(method='GET', body=jobContent)\n dataRequest = f\"async/{jobid}/results/result\"\n connHandler.set_response(dataRequest, responseGetData)\n \n"
},
{
"change_type": "MODIFY",
"old_path": "astroquery/utils/tap/tests/test_tap.py",
"new_path": "astroquery/utils/tap/tests/test_tap.py",
"diff": "@@ -39,10 +39,7 @@ def test_load_tables():\n responseLoadTable = DummyResponse(500)\n tableDataFile = data_path('test_tables.xml')\n tableData = utils.read_file_content(tableDataFile)\n- responseLoadTable.set_data(method='GET',\n- context=None,\n- body=tableData,\n- headers=None)\n+ responseLoadTable.set_data(method='GET', body=tableData)\n tableRequest = \"tables\"\n connHandler.set_response(tableRequest, responseLoadTable)\n with pytest.raises(Exception):\n@@ -81,10 +78,7 @@ def test_load_tables_parameters():\n responseLoadTable = DummyResponse(200)\n tableDataFile = data_path('test_tables.xml')\n tableData = utils.read_file_content(tableDataFile)\n- responseLoadTable.set_data(method='GET',\n- context=None,\n- body=tableData,\n- headers=None)\n+ responseLoadTable.set_data(method='GET', body=tableData)\n tableRequest = \"tables\"\n connHandler.set_response(tableRequest, responseLoadTable)\n \n@@ -132,10 +126,7 @@ def test_load_table():\n responseLoadTable = DummyResponse(500)\n tableDataFile = data_path('test_table1.xml')\n tableData = utils.read_file_content(tableDataFile)\n- responseLoadTable.set_data(method='GET',\n- context=None,\n- body=tableData,\n- headers=None)\n+ responseLoadTable.set_data(method='GET', body=tableData)\n tableSchema = \"public\"\n tableName = \"table1\"\n fullQualifiedTableName = f\"{tableSchema}.{tableName}\"\n@@ -163,10 +154,7 @@ def test_launch_sync_job():\n responseLaunchJob = DummyResponse(500)\n jobDataFile = data_path('job_1.vot')\n jobData = utils.read_file_content(jobDataFile)\n- responseLaunchJob.set_data(method='POST',\n- context=None,\n- body=jobData,\n- headers=None)\n+ responseLaunchJob.set_data(method='POST', body=jobData)\n query = 'select top 5 * from table'\n dTmp = {\"q\": query}\n dTmpEncoded = connHandler.url_encode(dTmp)\n@@ -229,10 +217,7 @@ def test_launch_sync_job_redirect():\n launchResponseHeaders = [\n ['location', resultsLocation]\n ]\n- responseLaunchJob.set_data(method='POST',\n- context=None,\n- body=None,\n- headers=None)\n+ responseLaunchJob.set_data(method='POST')\n query = 'select top 5 * from table'\n dTmp = {\"q\": query}\n dTmpEncoded = connHandler.url_encode(dTmp)\n@@ -252,10 +237,7 @@ def test_launch_sync_job_redirect():\n responseResultsJob = DummyResponse(500)\n jobDataFile = data_path('job_1.vot')\n jobData = utils.read_file_content(jobDataFile)\n- responseResultsJob.set_data(method='GET',\n- context=None,\n- body=jobData,\n- headers=None)\n+ responseResultsJob.set_data(method='GET', body=jobData)\n connHandler.set_response(resultsReq, responseResultsJob)\n \n with pytest.raises(Exception):\n@@ -271,10 +253,7 @@ def test_launch_sync_job_redirect():\n # Location available\n # Results raises error (500)\n responseResultsJob.set_status_code(200)\n- responseLaunchJob.set_data(method='POST',\n- context=None,\n- body=None,\n- headers=launchResponseHeaders)\n+ responseLaunchJob.set_data(method='POST', headers=launchResponseHeaders)\n responseResultsJob.set_status_code(500)\n with pytest.raises(Exception):\n tap.launch_job(query)\n@@ -324,10 +303,7 @@ def test_launch_async_job():\n launchResponseHeaders = [\n ['location', f'http://test:1111/tap/async/{jobid}']\n ]\n- responseLaunchJob.set_data(method='POST',\n- context=None,\n- body=None,\n- headers=launchResponseHeaders)\n+ responseLaunchJob.set_data(method='POST', headers=launchResponseHeaders)\n query = 'query'\n dictTmp = {\n \"REQUEST\": \"doQuery\",\n@@ -341,20 +317,14 @@ def test_launch_async_job():\n connHandler.set_response(req, responseLaunchJob)\n # Phase response\n responsePhase = DummyResponse(500)\n- responsePhase.set_data(method='GET',\n- context=None,\n- body=\"COMPLETED\",\n- headers=None)\n+ responsePhase.set_data(method='GET', body=\"COMPLETED\")\n req = f\"async/{jobid}/phase\"\n connHandler.set_response(req, responsePhase)\n # Results response\n responseResultsJob = DummyResponse(500)\n jobDataFile = data_path('job_1.vot')\n jobData = utils.read_file_content(jobDataFile)\n- responseResultsJob.set_data(method='GET',\n- context=None,\n- body=jobData,\n- headers=None)\n+ responseResultsJob.set_data(method='GET', body=jobData)\n req = f\"async/{jobid}/results/result\"\n connHandler.set_response(req, responseResultsJob)\n \n@@ -407,10 +377,7 @@ def test_start_job():\n jobid = '12345'\n # Phase POST response\n responsePhase = DummyResponse(200)\n- responsePhase.set_data(method='POST',\n- context=None,\n- body=None,\n- headers=None)\n+ responsePhase.set_data(method='POST')\n req = f\"async/{jobid}/phase?PHASE=RUN\"\n connHandler.set_response(req, responsePhase)\n # Launch response\n@@ -419,10 +386,7 @@ def test_start_job():\n launchResponseHeaders = [\n ['location', f'http://test:1111/tap/async/{jobid}']\n ]\n- responseLaunchJob.set_data(method='POST',\n- context=None,\n- body=None,\n- headers=launchResponseHeaders)\n+ responseLaunchJob.set_data(method='POST', headers=launchResponseHeaders)\n query = 'query'\n dictTmp = {\n \"REQUEST\": \"doQuery\",\n@@ -435,20 +399,14 @@ def test_start_job():\n connHandler.set_response(req, responseLaunchJob)\n # Phase response\n responsePhase = DummyResponse(200)\n- responsePhase.set_data(method='GET',\n- context=None,\n- body=\"COMPLETED\",\n- headers=None)\n+ responsePhase.set_data(method='GET', body=\"COMPLETED\")\n req = f\"async/{jobid}/phase\"\n connHandler.set_response(req, responsePhase)\n # Results response\n responseResultsJob = DummyResponse(200)\n jobDataFile = data_path('job_1.vot')\n jobData = utils.read_file_content(jobDataFile)\n- responseResultsJob.set_data(method='GET',\n- context=None,\n- body=jobData,\n- headers=None)\n+ responseResultsJob.set_data(method='GET', body=jobData)\n req = f\"async/{jobid}/results/result\"\n connHandler.set_response(req, responseResultsJob)\n \n@@ -476,10 +434,7 @@ def test_abort_job():\n jobid = '12345'\n # Phase POST response\n responsePhase = DummyResponse(200)\n- responsePhase.set_data(method='POST',\n- context=None,\n- body=None,\n- headers=None)\n+ responsePhase.set_data(method='POST')\n req = f\"async/{jobid}/phase?PHASE=ABORT\"\n connHandler.set_response(req, responsePhase)\n # Launch response\n@@ -488,10 +443,7 @@ def test_abort_job():\n launchResponseHeaders = [\n ['location', f'http://test:1111/tap/async/{jobid}']\n ]\n- responseLaunchJob.set_data(method='POST',\n- context=None,\n- body=None,\n- headers=launchResponseHeaders)\n+ responseLaunchJob.set_data(method='POST', headers=launchResponseHeaders)\n query = 'query'\n dictTmp = {\n \"REQUEST\": \"doQuery\",\n@@ -524,10 +476,7 @@ def test_job_parameters():\n launchResponseHeaders = [\n ['location', f'http://test:1111/tap/async/{jobid}']\n ]\n- responseLaunchJob.set_data(method='POST',\n- context=None,\n- body=None,\n- headers=launchResponseHeaders)\n+ responseLaunchJob.set_data(method='POST', headers=launchResponseHeaders)\n query = 'query'\n dictTmp = {\n \"REQUEST\": \"doQuery\",\n@@ -540,20 +489,14 @@ def test_job_parameters():\n connHandler.set_response(req, responseLaunchJob)\n # Phase response\n responsePhase = DummyResponse(200)\n- responsePhase.set_data(method='GET',\n- context=None,\n- body=\"COMPLETED\",\n- headers=None)\n+ responsePhase.set_data(method='GET', body=\"COMPLETED\")\n req = f\"async/{jobid}/phase\"\n connHandler.set_response(req, responsePhase)\n # Results response\n responseResultsJob = DummyResponse(200)\n jobDataFile = data_path('job_1.vot')\n jobData = utils.read_file_content(jobDataFile)\n- responseResultsJob.set_data(method='GET',\n- context=None,\n- body=jobData,\n- headers=None)\n+ responseResultsJob.set_data(method='GET', body=jobData)\n req = f\"async/{jobid}/results/result\"\n connHandler.set_response(req, responseResultsJob)\n \n@@ -564,18 +507,12 @@ def test_job_parameters():\n \n # parameter response\n responseParameters = DummyResponse(200)\n- responseParameters.set_data(method='GET',\n- context=None,\n- body=None,\n- headers=None)\n+ responseParameters.set_data(method='GET')\n req = f\"async/{jobid}?param1=value1\"\n connHandler.set_response(req, responseParameters)\n # Phase POST response\n responsePhase = DummyResponse(200)\n- responsePhase.set_data(method='POST',\n- context=None,\n- body=None,\n- headers=None)\n+ responsePhase.set_data(method='POST')\n req = f\"async/{jobid}/phase?PHASE=RUN\"\n connHandler.set_response(req, responsePhase)\n \n@@ -595,10 +532,7 @@ def test_list_async_jobs():\n response = DummyResponse(500)\n jobDataFile = data_path('jobs_list.xml')\n jobData = utils.read_file_content(jobDataFile)\n- response.set_data(method='GET',\n- context=None,\n- body=jobData,\n- headers=None)\n+ response.set_data(method='GET', body=jobData)\n req = \"async\"\n connHandler.set_response(req, response)\n with pytest.raises(Exception):\n@@ -621,10 +555,7 @@ def test_data():\n responseResultsJob = DummyResponse(200)\n jobDataFile = data_path('job_1.vot')\n jobData = utils.read_file_content(jobDataFile)\n- responseResultsJob.set_data(method='GET',\n- context=None,\n- body=jobData,\n- headers=None)\n+ responseResultsJob.set_data(method='GET', body=jobData)\n req = \"?ID=1%2C2&format=votable\"\n connHandler.set_response(req, responseResultsJob)\n req = \"?ID=1%2C2\"\n@@ -660,10 +591,7 @@ def test_datalink():\n responseResultsJob = DummyResponse(200)\n jobDataFile = data_path('job_1.vot')\n jobData = utils.read_file_content(jobDataFile)\n- responseResultsJob.set_data(method='GET',\n- context=None,\n- body=jobData,\n- headers=None)\n+ responseResultsJob.set_data(method='GET', body=jobData)\n req = \"links?ID=1,2\"\n connHandler.set_response(req, responseResultsJob)\n \n@@ -828,10 +756,7 @@ def test_update_user_table():\n dummyResponse = DummyResponse(200)\n tableDataFile = data_path('test_table_update.xml')\n tableData = utils.read_file_content(tableDataFile)\n- dummyResponse.set_data(method='GET',\n- context=None,\n- body=tableData,\n- headers=None)\n+ dummyResponse.set_data(method='GET', body=tableData)\n tableRequest = f\"tables?tables={tableName}\"\n connHandler.set_response(tableRequest, dummyResponse)\n \n@@ -899,10 +824,7 @@ def test_rename_table():\n dummyResponse = DummyResponse(200)\n tableDataFile = data_path('test_table_rename.xml')\n tableData = utils.read_file_content(tableDataFile)\n- dummyResponse.set_data(method='GET',\n- context=None,\n- body=tableData,\n- headers=None)\n+ dummyResponse.set_data(method='GET', body=tableData)\n \n with pytest.raises(Exception):\n tap.rename_table()\n"
}
] |
a2215fa0604f86cb1a4f006c5cb706375aa0d511 | astropy/astroquery | 10.10.2022 23:40:08 | BSD 3-Clause New or Revised License | Simplify column attribute checking in `gaia` tests
The function `__check_results_column()` tested if the attributes (e.g.
description) of the columns of the query results match the expectation.
This function is now replaced with
`astropy.table.BaseColumn.attrs_equal()`, and the comparison data is now
provided by a `pytest` fixture. | [
{
"change_type": "MODIFY",
"old_path": "astroquery/gaia/tests/test_gaiatap.py",
"new_path": "astroquery/gaia/tests/test_gaiatap.py",
"diff": "@@ -18,6 +18,7 @@ import os\n from unittest.mock import patch\n \n import pytest\n+from astropy.table import Column\n from requests import HTTPError\n \n from astroquery.gaia import conf\n@@ -39,6 +40,19 @@ def data_path(filename):\n return os.path.join(data_dir, filename)\n \n \n+@pytest.fixture(scope=\"module\")\n+def column_attrs():\n+ dtypes = {\n+ \"alpha\": np.float64,\n+ \"delta\": np.float64,\n+ \"source_id\": object,\n+ \"table1_oid\": np.int32\n+ }\n+ columns = {k: Column(name=k, description=k, dtype=v) for k, v in dtypes.items()}\n+ columns[\"source_id\"].meta = {\"_votable_string_dtype\": \"char\"}\n+ return columns\n+\n+\n class TestTap:\n \n def test_show_message(self):\n@@ -58,7 +72,7 @@ class TestTap:\n tapplus = TapPlus(\"http://test:1111/tap\", connhandler=connHandler)\n GaiaClass(connHandler, tapplus, show_server_messages=True)\n \n- def test_query_object(self):\n+ def test_query_object(self, column_attrs):\n conn_handler = DummyConnHandler()\n tapplus = TapPlus(\"http://test:1111/tap\", connhandler=conn_handler)\n tap = GaiaClass(conn_handler, tapplus, show_server_messages=False)\n@@ -87,54 +101,18 @@ class TestTap:\n assert len(table) == 3, \\\n \"Wrong job results (num rows). Expected: %d, found %d\" % \\\n (3, len(table))\n- self.__check_results_column(table,\n- 'alpha',\n- 'alpha',\n- None,\n- np.float64)\n- self.__check_results_column(table,\n- 'delta',\n- 'delta',\n- None,\n- np.float64)\n- self.__check_results_column(table,\n- 'source_id',\n- 'source_id',\n- None,\n- object)\n- self.__check_results_column(table,\n- 'table1_oid',\n- 'table1_oid',\n- None,\n- np.int32)\n+ for colname, attrs in column_attrs.items():\n+ assert table[colname].attrs_equal(attrs)\n # by radius\n radius = Quantity(1, u.deg)\n table = tap.query_object(sc, radius=radius)\n assert len(table) == 3, \\\n \"Wrong job results (num rows). Expected: %d, found %d\" % \\\n (3, len(table))\n- self.__check_results_column(table,\n- 'alpha',\n- 'alpha',\n- None,\n- np.float64)\n- self.__check_results_column(table,\n- 'delta',\n- 'delta',\n- None,\n- np.float64)\n- self.__check_results_column(table,\n- 'source_id',\n- 'source_id',\n- None,\n- object)\n- self.__check_results_column(table,\n- 'table1_oid',\n- 'table1_oid',\n- None,\n- np.int32)\n-\n- def test_query_object_async(self):\n+ for colname, attrs in column_attrs.items():\n+ assert table[colname].attrs_equal(attrs)\n+\n+ def test_query_object_async(self, column_attrs):\n conn_handler = DummyConnHandler()\n tapplus = TapPlus(\"http://test:1111/tap\", connhandler=conn_handler)\n tap = GaiaClass(conn_handler, tapplus, show_server_messages=False)\n@@ -167,54 +145,18 @@ class TestTap:\n assert len(table) == 3, \\\n \"Wrong job results (num rows). Expected: %d, found %d\" % \\\n (3, len(table))\n- self.__check_results_column(table,\n- 'alpha',\n- 'alpha',\n- None,\n- np.float64)\n- self.__check_results_column(table,\n- 'delta',\n- 'delta',\n- None,\n- np.float64)\n- self.__check_results_column(table,\n- 'source_id',\n- 'source_id',\n- None,\n- object)\n- self.__check_results_column(table,\n- 'table1_oid',\n- 'table1_oid',\n- None,\n- np.int32)\n+ for colname, attrs in column_attrs.items():\n+ assert table[colname].attrs_equal(attrs)\n # by radius\n radius = Quantity(1, u.deg)\n table = tap.query_object_async(sc, radius=radius)\n assert len(table) == 3, \\\n \"Wrong job results (num rows). Expected: %d, found %d\" % \\\n (3, len(table))\n- self.__check_results_column(table,\n- 'alpha',\n- 'alpha',\n- None,\n- np.float64)\n- self.__check_results_column(table,\n- 'delta',\n- 'delta',\n- None,\n- np.float64)\n- self.__check_results_column(table,\n- 'source_id',\n- 'source_id',\n- None,\n- object)\n- self.__check_results_column(table,\n- 'table1_oid',\n- 'table1_oid',\n- None,\n- np.int32)\n-\n- def test_cone_search_sync(self):\n+ for colname, attrs in column_attrs.items():\n+ assert table[colname].attrs_equal(attrs)\n+\n+ def test_cone_search_sync(self, column_attrs):\n conn_handler = DummyConnHandler()\n tapplus = TapPlus(\"http://test:1111/tap\", connhandler=conn_handler)\n tap = GaiaClass(conn_handler, tapplus, show_server_messages=False)\n@@ -241,28 +183,10 @@ class TestTap:\n assert len(results) == 3, \\\n \"Wrong job results (num rows). Expected: %d, found %d\" % \\\n (3, len(results))\n- self.__check_results_column(results,\n- 'alpha',\n- 'alpha',\n- None,\n- np.float64)\n- self.__check_results_column(results,\n- 'delta',\n- 'delta',\n- None,\n- np.float64)\n- self.__check_results_column(results,\n- 'source_id',\n- 'source_id',\n- None,\n- object)\n- self.__check_results_column(results,\n- 'table1_oid',\n- 'table1_oid',\n- None,\n- np.int32)\n-\n- def test_cone_search_async(self):\n+ for colname, attrs in column_attrs.items():\n+ assert results[colname].attrs_equal(attrs)\n+\n+ def test_cone_search_async(self, column_attrs):\n conn_handler = DummyConnHandler()\n tapplus = TapPlus(\"http://test:1111/tap\", connhandler=conn_handler)\n tap = GaiaClass(conn_handler, tapplus, show_server_messages=False)\n@@ -303,26 +227,8 @@ class TestTap:\n assert len(results) == 3, \\\n \"Wrong job results (num rows). Expected: %d, found %d\" % \\\n (3, len(results))\n- self.__check_results_column(results,\n- 'alpha',\n- 'alpha',\n- None,\n- np.float64)\n- self.__check_results_column(results,\n- 'delta',\n- 'delta',\n- None,\n- np.float64)\n- self.__check_results_column(results,\n- 'source_id',\n- 'source_id',\n- None,\n- object)\n- self.__check_results_column(results,\n- 'table1_oid',\n- 'table1_oid',\n- None,\n- np.int32)\n+ for colname, attrs in column_attrs.items():\n+ assert results[colname].attrs_equal(attrs)\n \n # Regression test for #2093 and #2099 - changing the MAIN_GAIA_TABLE\n # had no effect.\n@@ -339,22 +245,6 @@ class TestTap:\n # Cleanup.\n conf.reset('MAIN_GAIA_TABLE')\n \n- def __check_results_column(self, results, column_name, description, unit,\n- data_type):\n- c = results[column_name]\n- assert c.description == description, \\\n- \"Wrong description for results column '%s'. \" % \\\n- \"Expected: '%s', found '%s'\" % \\\n- (column_name, description, c.description)\n- assert c.unit == unit, \\\n- \"Wrong unit for results column '%s'. \" % \\\n- \"Expected: '%s', found '%s'\" % \\\n- (column_name, unit, c.unit)\n- assert c.dtype == data_type, \\\n- \"Wrong dataType for results column '%s'. \" % \\\n- \"Expected: '%s', found '%s'\" % \\\n- (column_name, data_type, c.dtype)\n-\n def test_load_data(self):\n dummy_handler = DummyTapHandler()\n tap = GaiaClass(dummy_handler, dummy_handler, show_server_messages=False)\n"
}
] |
897aed03b17583152118d44f7a3b78f22a8a08b6 | astropy/astroquery | 12.10.2022 16:57:42 | BSD 3-Clause New or Revised License | Reduce duplication in setting up `gaia` tests
The new module level fixtures allowed removing setup code from many
tests. | [
{
"change_type": "MODIFY",
"old_path": "astroquery/gaia/tests/test_gaiatap.py",
"new_path": "astroquery/gaia/tests/test_gaiatap.py",
"diff": "@@ -15,6 +15,7 @@ Created on 30 jun. 2016\n \n \"\"\"\n import os\n+from pathlib import Path\n from unittest.mock import patch\n \n import pytest\n@@ -35,9 +36,7 @@ from astroquery.utils.tap.core import TapPlus, TAP_CLIENT_ID\n from astroquery.utils.tap import taputils\n \n \n-def data_path(filename):\n- data_dir = os.path.join(os.path.dirname(__file__), 'data')\n- return os.path.join(data_dir, filename)\n+job_data = utils.read_file_content(Path(__file__).parent.joinpath(\"data\", \"job_1.vot\"))\n \n \n @pytest.fixture(scope=\"module\")\n@@ -53,6 +52,53 @@ def column_attrs():\n return columns\n \n \n+@pytest.fixture(scope=\"module\")\n+def mock_querier():\n+ conn_handler = DummyConnHandler()\n+ tapplus = TapPlus(\"http://test:1111/tap\", connhandler=conn_handler)\n+ launch_response = DummyResponse(200)\n+ launch_response.set_data(method=\"POST\", body=job_data)\n+ # The query contains decimals: default response is more robust.\n+ conn_handler.set_default_response(launch_response)\n+ return GaiaClass(conn_handler, tapplus, show_server_messages=False)\n+\n+\n+@pytest.fixture(scope=\"module\")\n+def mock_querier_async():\n+ conn_handler = DummyConnHandler()\n+ tapplus = TapPlus(\"http://test:1111/tap\", connhandler=conn_handler)\n+ jobid = \"12345\"\n+\n+ launch_response = DummyResponse(303)\n+ launch_response_headers = [[\"location\", \"http://test:1111/tap/async/\" + jobid]]\n+ launch_response.set_data(method=\"POST\", headers=launch_response_headers)\n+ conn_handler.set_default_response(launch_response)\n+\n+ phase_response = DummyResponse(200)\n+ phase_response.set_data(method=\"GET\", body=\"COMPLETED\")\n+ conn_handler.set_response(\"async/\" + jobid + \"/phase\", phase_response)\n+\n+ results_response = DummyResponse(200)\n+ results_response.set_data(method=\"GET\", body=job_data)\n+ conn_handler.set_response(\"async/\" + jobid + \"/results/result\", results_response)\n+\n+ dict_tmp = {\n+ \"REQUEST\": \"doQuery\",\n+ \"LANG\": \"ADQL\",\n+ \"FORMAT\": \"votable\",\n+ \"tapclient\": TAP_CLIENT_ID,\n+ \"PHASE\": \"RUN\",\n+ \"QUERY\": (\n+ \"SELECT crossmatch_positional('schemaA','tableA','schemaB','tableB',1.0,\"\n+ \"'results')FROM dual;\"\n+ )\n+ }\n+ sorted_key = taputils.taputil_create_sorted_dict_key(dict_tmp)\n+ conn_handler.set_response(\"sync?\" + sorted_key, launch_response)\n+\n+ return GaiaClass(conn_handler, tapplus, show_server_messages=False)\n+\n+\n class TestTap:\n \n def test_show_message(self):\n@@ -72,32 +118,21 @@ class TestTap:\n tapplus = TapPlus(\"http://test:1111/tap\", connhandler=connHandler)\n GaiaClass(connHandler, tapplus, show_server_messages=True)\n \n- def test_query_object(self, column_attrs):\n- conn_handler = DummyConnHandler()\n- tapplus = TapPlus(\"http://test:1111/tap\", connhandler=conn_handler)\n- tap = GaiaClass(conn_handler, tapplus, show_server_messages=False)\n- # Launch response: we use default response because the query contains\n- # decimals\n- response_launch_job = DummyResponse(200)\n- job_data_file = data_path('job_1.vot')\n- job_data = utils.read_file_content(job_data_file)\n- response_launch_job.set_data(method='POST', body=job_data)\n- # The query contains decimals: force default response\n- conn_handler.set_default_response(response_launch_job)\n+ def test_query_object(self, column_attrs, mock_querier):\n sc = SkyCoord(ra=29.0, dec=15.0, unit=(u.degree, u.degree),\n frame='icrs')\n with pytest.raises(ValueError) as err:\n- tap.query_object(sc)\n+ mock_querier.query_object(sc)\n assert \"Missing required argument: width\" in err.value.args[0]\n \n width = Quantity(12, u.deg)\n \n with pytest.raises(ValueError) as err:\n- tap.query_object(sc, width=width)\n+ mock_querier.query_object(sc, width=width)\n assert \"Missing required argument: height\" in err.value.args[0]\n \n height = Quantity(10, u.deg)\n- table = tap.query_object(sc, width=width, height=height)\n+ table = mock_querier.query_object(sc, width=width, height=height)\n assert len(table) == 3, \\\n \"Wrong job results (num rows). Expected: %d, found %d\" % \\\n (3, len(table))\n@@ -105,43 +140,19 @@ class TestTap:\n assert table[colname].attrs_equal(attrs)\n # by radius\n radius = Quantity(1, u.deg)\n- table = tap.query_object(sc, radius=radius)\n+ table = mock_querier.query_object(sc, radius=radius)\n assert len(table) == 3, \\\n \"Wrong job results (num rows). Expected: %d, found %d\" % \\\n (3, len(table))\n for colname, attrs in column_attrs.items():\n assert table[colname].attrs_equal(attrs)\n \n- def test_query_object_async(self, column_attrs):\n- conn_handler = DummyConnHandler()\n- tapplus = TapPlus(\"http://test:1111/tap\", connhandler=conn_handler)\n- tap = GaiaClass(conn_handler, tapplus, show_server_messages=False)\n- jobid = '12345'\n- # Launch response\n- response_launch_job = DummyResponse(303)\n- # list of list (httplib implementation for headers in response)\n- launch_response_headers = [\n- ['location', 'http://test:1111/tap/async/' + jobid]\n- ]\n- response_launch_job.set_data(method='POST', headers=launch_response_headers)\n- conn_handler.set_default_response(response_launch_job)\n- # Phase response\n- response_phase = DummyResponse(200)\n- response_phase.set_data(method='GET', body=\"COMPLETED\")\n- req = \"async/\" + jobid + \"/phase\"\n- conn_handler.set_response(req, response_phase)\n- # Results response\n- response_results_job = DummyResponse(200)\n- job_data_file = data_path('job_1.vot')\n- job_data = utils.read_file_content(job_data_file)\n- response_results_job.set_data(method='GET', body=job_data)\n- req = \"async/\" + jobid + \"/results/result\"\n- conn_handler.set_response(req, response_results_job)\n+ def test_query_object_async(self, column_attrs, mock_querier_async):\n sc = SkyCoord(ra=29.0, dec=15.0, unit=(u.degree, u.degree),\n frame='icrs')\n width = Quantity(12, u.deg)\n height = Quantity(10, u.deg)\n- table = tap.query_object_async(sc, width=width, height=height)\n+ table = mock_querier_async.query_object_async(sc, width=width, height=height)\n assert len(table) == 3, \\\n \"Wrong job results (num rows). Expected: %d, found %d\" % \\\n (3, len(table))\n@@ -149,29 +160,19 @@ class TestTap:\n assert table[colname].attrs_equal(attrs)\n # by radius\n radius = Quantity(1, u.deg)\n- table = tap.query_object_async(sc, radius=radius)\n+ table = mock_querier_async.query_object_async(sc, radius=radius)\n assert len(table) == 3, \\\n \"Wrong job results (num rows). Expected: %d, found %d\" % \\\n (3, len(table))\n for colname, attrs in column_attrs.items():\n assert table[colname].attrs_equal(attrs)\n \n- def test_cone_search_sync(self, column_attrs):\n- conn_handler = DummyConnHandler()\n- tapplus = TapPlus(\"http://test:1111/tap\", connhandler=conn_handler)\n- tap = GaiaClass(conn_handler, tapplus, show_server_messages=False)\n- # Launch response: we use default response because the query contains\n- # decimals\n- response_launch_job = DummyResponse(200)\n- job_data_file = data_path('job_1.vot')\n- job_data = utils.read_file_content(job_data_file)\n- response_launch_job.set_data(method='POST', body=job_data)\n+ def test_cone_search_sync(self, column_attrs, mock_querier):\n ra = 19.0\n dec = 20.0\n sc = SkyCoord(ra=ra, dec=dec, unit=(u.degree, u.degree), frame='icrs')\n radius = Quantity(1.0, u.deg)\n- conn_handler.set_default_response(response_launch_job)\n- job = tap.cone_search(sc, radius)\n+ job = mock_querier.cone_search(sc, radius)\n assert job is not None, \"Expected a valid job\"\n assert job.async_ is False, \"Expected a synchronous job\"\n assert job.get_phase() == 'COMPLETED', \\\n@@ -186,36 +187,12 @@ class TestTap:\n for colname, attrs in column_attrs.items():\n assert results[colname].attrs_equal(attrs)\n \n- def test_cone_search_async(self, column_attrs):\n- conn_handler = DummyConnHandler()\n- tapplus = TapPlus(\"http://test:1111/tap\", connhandler=conn_handler)\n- tap = GaiaClass(conn_handler, tapplus, show_server_messages=False)\n- jobid = '12345'\n- # Launch response\n- response_launch_job = DummyResponse(303)\n- # list of list (httplib implementation for headers in response)\n- launch_response_headers = [\n- ['location', 'http://test:1111/tap/async/' + jobid]\n- ]\n- response_launch_job.set_data(method='POST', headers=launch_response_headers)\n+ def test_cone_search_async(self, column_attrs, mock_querier_async):\n ra = 19\n dec = 20\n sc = SkyCoord(ra=ra, dec=dec, unit=(u.degree, u.degree), frame='icrs')\n radius = Quantity(1.0, u.deg)\n- conn_handler.set_default_response(response_launch_job)\n- # Phase response\n- response_phase = DummyResponse(200)\n- response_phase.set_data(method='GET', body=\"COMPLETED\")\n- req = \"async/\" + jobid + \"/phase\"\n- conn_handler.set_response(req, response_phase)\n- # Results response\n- response_results_job = DummyResponse(200)\n- job_data_file = data_path('job_1.vot')\n- job_data = utils.read_file_content(job_data_file)\n- response_results_job.set_data(method='GET', body=job_data)\n- req = \"async/\" + jobid + \"/results/result\"\n- conn_handler.set_response(req, response_results_job)\n- job = tap.cone_search_async(sc, radius)\n+ job = mock_querier_async.cone_search_async(sc, radius)\n assert job is not None, \"Expected a valid job\"\n assert job.async_ is True, \"Expected an asynchronous job\"\n assert job.get_phase() == 'COMPLETED', \\\n@@ -236,11 +213,11 @@ class TestTap:\n assert 'gaiadr2.gaia_source' in job.parameters['query']\n # Test changing the table name through conf.\n conf.MAIN_GAIA_TABLE = 'name_from_conf'\n- job = tap.cone_search_async(sc, radius)\n+ job = mock_querier_async.cone_search_async(sc, radius)\n assert 'name_from_conf' in job.parameters['query']\n # Changing the value through the class should overrule conf.\n- tap.MAIN_GAIA_TABLE = 'name_from_class'\n- job = tap.cone_search_async(sc, radius)\n+ mock_querier_async.MAIN_GAIA_TABLE = 'name_from_class'\n+ job = mock_querier_async.cone_search_async(sc, radius)\n assert 'name_from_class' in job.parameters['query']\n # Cleanup.\n conf.reset('MAIN_GAIA_TABLE')\n@@ -295,100 +272,77 @@ class TestTap:\n tap.get_datalinks(ids, verbose)\n dummy_handler.check_call('get_datalinks', parameters)\n \n- def test_xmatch(self):\n- conn_handler = DummyConnHandler()\n- tapplus = TapPlus(\"http://test:1111/tap\", connhandler=conn_handler)\n- tap = GaiaClass(conn_handler, tapplus, show_server_messages=False)\n- jobid = '12345'\n- # Launch response\n- response_launch_job = DummyResponse(303)\n- # list of list (httplib implementation for headers in response)\n- launch_response_headers = [\n- ['location', 'http://test:1111/tap/async/' + jobid]\n- ]\n- response_launch_job.set_data(method='POST', headers=launch_response_headers)\n- conn_handler.set_default_response(response_launch_job)\n- # Phase response\n- response_phase = DummyResponse(200)\n- response_phase.set_data(method='GET', body=\"COMPLETED\")\n- req = \"async/\" + jobid + \"/phase\"\n- conn_handler.set_response(req, response_phase)\n- # Results response\n- response_results_job = DummyResponse(200)\n- job_data_file = data_path('job_1.vot')\n- job_data = utils.read_file_content(job_data_file)\n- response_results_job.set_data(method='GET', body=job_data)\n- req = \"async/\" + jobid + \"/results/result\"\n- conn_handler.set_response(req, response_results_job)\n- query = (\"SELECT crossmatch_positional(\",\n- \"'schemaA','tableA','schemaB','tableB',1.0,'results')\",\n- \"FROM dual;\")\n- d_tmp = {\"q\": query}\n- d_tmp_encoded = conn_handler.url_encode(d_tmp)\n- p = d_tmp_encoded.find(\"=\")\n- q = d_tmp_encoded[p + 1:]\n- dict_tmp = {\n- \"REQUEST\": \"doQuery\",\n- \"LANG\": \"ADQL\",\n- \"FORMAT\": \"votable\",\n- \"tapclient\": str(TAP_CLIENT_ID),\n- \"PHASE\": \"RUN\",\n- \"QUERY\": str(q)}\n- sorted_key = taputils.taputil_create_sorted_dict_key(dict_tmp)\n- job_request = \"sync?\" + sorted_key\n- conn_handler.set_response(job_request, response_launch_job)\n+ def test_xmatch(self, mock_querier_async):\n # check parameters\n # missing table A\n with pytest.raises(ValueError) as err:\n- tap.cross_match(full_qualified_table_name_a=None,\n- full_qualified_table_name_b='schemaB.tableB',\n- results_table_name='results')\n+ mock_querier_async.cross_match(\n+ full_qualified_table_name_a=None,\n+ full_qualified_table_name_b='schemaB.tableB',\n+ results_table_name='results',\n+ )\n assert \"Table name A argument is mandatory\" in err.value.args[0]\n # missing schema A\n with pytest.raises(ValueError) as err:\n- tap.cross_match(full_qualified_table_name_a='tableA',\n- full_qualified_table_name_b='schemaB.tableB',\n- results_table_name='results')\n+ mock_querier_async.cross_match(\n+ full_qualified_table_name_a='tableA',\n+ full_qualified_table_name_b='schemaB.tableB',\n+ results_table_name='results',\n+ )\n assert \"Not found schema name in full qualified table A: 'tableA'\" \\\n in err.value.args[0]\n # missing table B\n with pytest.raises(ValueError) as err:\n- tap.cross_match(full_qualified_table_name_a='schemaA.tableA',\n- full_qualified_table_name_b=None,\n- results_table_name='results')\n+ mock_querier_async.cross_match(\n+ full_qualified_table_name_a='schemaA.tableA',\n+ full_qualified_table_name_b=None,\n+ results_table_name='results',\n+ )\n assert \"Table name B argument is mandatory\" in err.value.args[0]\n # missing schema B\n with pytest.raises(ValueError) as err:\n- tap.cross_match(full_qualified_table_name_a='schemaA.tableA',\n- full_qualified_table_name_b='tableB',\n- results_table_name='results')\n+ mock_querier_async.cross_match(\n+ full_qualified_table_name_a='schemaA.tableA',\n+ full_qualified_table_name_b='tableB',\n+ results_table_name='results',\n+ )\n assert \"Not found schema name in full qualified table B: 'tableB'\" \\\n in err.value.args[0]\n # missing results table\n with pytest.raises(ValueError) as err:\n- tap.cross_match(full_qualified_table_name_a='schemaA.tableA',\n- full_qualified_table_name_b='schemaB.tableB',\n- results_table_name=None)\n+ mock_querier_async.cross_match(\n+ full_qualified_table_name_a='schemaA.tableA',\n+ full_qualified_table_name_b='schemaB.tableB',\n+ results_table_name=None,\n+ )\n assert \"Results table name argument is mandatory\" in err.value.args[0]\n # wrong results table (with schema)\n with pytest.raises(ValueError) as err:\n- tap.cross_match(full_qualified_table_name_a='schemaA.tableA',\n- full_qualified_table_name_b='schemaB.tableB',\n- results_table_name='schema.results')\n+ mock_querier_async.cross_match(\n+ full_qualified_table_name_a='schemaA.tableA',\n+ full_qualified_table_name_b='schemaB.tableB',\n+ results_table_name='schema.results',\n+ )\n assert \"Please, do not specify schema for 'results_table_name'\" \\\n in err.value.args[0]\n # radius < 0.1\n with pytest.raises(ValueError) as err:\n- tap.cross_match(full_qualified_table_name_a='schemaA.tableA',\n- full_qualified_table_name_b='schemaB.tableB',\n- results_table_name='results', radius=0.01)\n+ mock_querier_async.cross_match(\n+ full_qualified_table_name_a='schemaA.tableA',\n+ full_qualified_table_name_b='schemaB.tableB',\n+ results_table_name='results',\n+ radius=0.01,\n+ )\n assert \"Invalid radius value. Found 0.01, valid range is: 0.1 to 10.0\" \\\n in err.value.args[0]\n # radius > 10.0\n with pytest.raises(ValueError) as err:\n- tap.cross_match(full_qualified_table_name_a='schemaA.tableA',\n- full_qualified_table_name_b='schemaB.tableB',\n- results_table_name='results', radius=10.1)\n+ mock_querier_async.cross_match(\n+ full_qualified_table_name_a='schemaA.tableA',\n+ full_qualified_table_name_b='schemaB.tableB',\n+ results_table_name='results',\n+ radius=10.1\n+ )\n assert \"Invalid radius value. Found 10.1, valid range is: 0.1 to 10.0\" \\\n in err.value.args[0]\n # check default parameters\n@@ -408,19 +362,22 @@ class TestTap:\n parameters['background'] = False\n parameters['upload_resource'] = None\n parameters['upload_table_name'] = None\n- job = tap.cross_match(full_qualified_table_name_a='schemaA.tableA',\n- full_qualified_table_name_b='schemaB.tableB',\n- results_table_name='results')\n+ job = mock_querier_async.cross_match(\n+ full_qualified_table_name_a='schemaA.tableA',\n+ full_qualified_table_name_b='schemaB.tableB',\n+ results_table_name='results',\n+ )\n assert job.async_ is True, \"Expected an asynchronous job\"\n assert job.get_phase() == 'COMPLETED', \\\n \"Wrong job phase. Expected: %s, found %s\" % \\\n ('COMPLETED', job.get_phase())\n assert job.failed is False, \"Wrong job status (set Failed = True)\"\n- job = tap.cross_match(\n+ job = mock_querier_async.cross_match(\n full_qualified_table_name_a='schemaA.tableA',\n full_qualified_table_name_b='schemaB.tableB',\n results_table_name='results',\n- background=True)\n+ background=True,\n+ )\n assert job.async_ is True, \"Expected an asynchronous job\"\n assert job.get_phase() == 'EXECUTING', \\\n \"Wrong job phase. Expected: %s, found %s\" % \\\n"
}
] |
f1805854fe98984268a8693e0e970b37b14cb50e | astropy/astroquery | 12.10.2022 16:58:48 | BSD 3-Clause New or Revised License | Remove `TestTap` class from `gaia` tests
Grouping the tests together in a test class served no purpose and
eliminating the class removed a level of indentation. | [
{
"change_type": "MODIFY",
"old_path": "astroquery/gaia/tests/test_gaiatap.py",
"new_path": "astroquery/gaia/tests/test_gaiatap.py",
"diff": "@@ -100,251 +100,249 @@ def mock_querier_async():\n return GaiaClass(conn_handler, tapplus, show_server_messages=False)\n \n \n-class TestTap:\n-\n- def test_show_message(self):\n- connHandler = DummyConnHandler()\n-\n- dummy_response = DummyResponse(200)\n-\n- message_text = \"1653401204784D[type: -100,-1]=Gaia dev is under maintenance\"\n-\n- dummy_response.set_data(method='GET', body=message_text)\n- connHandler.set_default_response(dummy_response)\n-\n- # show_server_messages\n- tableRequest = 'notification?action=GetNotifications'\n- connHandler.set_response(tableRequest, dummy_response)\n-\n- tapplus = TapPlus(\"http://test:1111/tap\", connhandler=connHandler)\n- GaiaClass(connHandler, tapplus, show_server_messages=True)\n-\n- def test_query_object(self, column_attrs, mock_querier):\n- with pytest.raises(ValueError) as err:\n- mock_querier.query_object(skycoord)\n- assert \"Missing required argument: width\" in err.value.args[0]\n-\n- width = 12 * u.deg\n-\n- with pytest.raises(ValueError) as err:\n- mock_querier.query_object(skycoord, width=width)\n- assert \"Missing required argument: height\" in err.value.args[0]\n-\n- table = mock_querier.query_object(skycoord, width=width, height=10 * u.deg)\n- assert len(table) == 3\n- for colname, attrs in column_attrs.items():\n- assert table[colname].attrs_equal(attrs)\n- # by radius\n- table = mock_querier.query_object(skycoord, radius=1 * u.deg)\n- assert len(table) == 3\n- for colname, attrs in column_attrs.items():\n- assert table[colname].attrs_equal(attrs)\n-\n- def test_query_object_async(self, column_attrs, mock_querier_async):\n- table = mock_querier_async.query_object_async(\n- skycoord, width=12 * u.deg, height=10 * u.deg\n- )\n- assert len(table) == 3\n- for colname, attrs in column_attrs.items():\n- assert table[colname].attrs_equal(attrs)\n- # by radius\n- table = mock_querier_async.query_object_async(skycoord, radius=1 * u.deg)\n- assert len(table) == 3\n- for colname, attrs in column_attrs.items():\n- assert table[colname].attrs_equal(attrs)\n-\n- def test_cone_search_sync(self, column_attrs, mock_querier):\n- job = mock_querier.cone_search(skycoord, 1 * u.deg)\n- assert job.async_ is False\n- assert job.get_phase() == \"COMPLETED\"\n- assert job.failed is False\n- # results\n- results = job.get_results()\n- assert len(results) == 3\n- for colname, attrs in column_attrs.items():\n- assert results[colname].attrs_equal(attrs)\n-\n- def test_cone_search_async(self, column_attrs, mock_querier_async):\n- radius = 1.0 * u.deg\n+def test_show_message():\n+ connHandler = DummyConnHandler()\n+\n+ dummy_response = DummyResponse(200)\n+\n+ message_text = \"1653401204784D[type: -100,-1]=Gaia dev is under maintenance\"\n+\n+ dummy_response.set_data(method='GET', body=message_text)\n+ connHandler.set_default_response(dummy_response)\n+\n+ # show_server_messages\n+ tableRequest = 'notification?action=GetNotifications'\n+ connHandler.set_response(tableRequest, dummy_response)\n+\n+ tapplus = TapPlus(\"http://test:1111/tap\", connhandler=connHandler)\n+ GaiaClass(connHandler, tapplus, show_server_messages=True)\n+\n+def test_query_object(column_attrs, mock_querier):\n+ with pytest.raises(ValueError) as err:\n+ mock_querier.query_object(skycoord)\n+ assert \"Missing required argument: width\" in err.value.args[0]\n+\n+ width = 12 * u.deg\n+\n+ with pytest.raises(ValueError) as err:\n+ mock_querier.query_object(skycoord, width=width)\n+ assert \"Missing required argument: height\" in err.value.args[0]\n+\n+ table = mock_querier.query_object(skycoord, width=width, height=10 * u.deg)\n+ assert len(table) == 3\n+ for colname, attrs in column_attrs.items():\n+ assert table[colname].attrs_equal(attrs)\n+ # by radius\n+ table = mock_querier.query_object(skycoord, radius=1 * u.deg)\n+ assert len(table) == 3\n+ for colname, attrs in column_attrs.items():\n+ assert table[colname].attrs_equal(attrs)\n+\n+def test_query_object_async(column_attrs, mock_querier_async):\n+ table = mock_querier_async.query_object_async(\n+ skycoord, width=12 * u.deg, height=10 * u.deg\n+ )\n+ assert len(table) == 3\n+ for colname, attrs in column_attrs.items():\n+ assert table[colname].attrs_equal(attrs)\n+ # by radius\n+ table = mock_querier_async.query_object_async(skycoord, radius=1 * u.deg)\n+ assert len(table) == 3\n+ for colname, attrs in column_attrs.items():\n+ assert table[colname].attrs_equal(attrs)\n+\n+def test_cone_search_sync(column_attrs, mock_querier):\n+ job = mock_querier.cone_search(skycoord, 1 * u.deg)\n+ assert job.async_ is False\n+ assert job.get_phase() == \"COMPLETED\"\n+ assert job.failed is False\n+ # results\n+ results = job.get_results()\n+ assert len(results) == 3\n+ for colname, attrs in column_attrs.items():\n+ assert results[colname].attrs_equal(attrs)\n+\n+def test_cone_search_async(column_attrs, mock_querier_async):\n+ radius = 1.0 * u.deg\n+ job = mock_querier_async.cone_search_async(skycoord, radius)\n+ assert job.async_ is True\n+ assert job.get_phase() == \"COMPLETED\"\n+ assert job.failed is False\n+ # results\n+ results = job.get_results()\n+ assert len(results) == 3\n+ for colname, attrs in column_attrs.items():\n+ assert results[colname].attrs_equal(attrs)\n+\n+ # Regression test for #2093 and #2099 - changing the MAIN_GAIA_TABLE\n+ # had no effect.\n+ # The preceding tests should have used the default value.\n+ assert 'gaiadr2.gaia_source' in job.parameters['query']\n+ with conf.set_temp(\"MAIN_GAIA_TABLE\", \"name_from_conf\"):\n+ job = mock_querier_async.cone_search_async(skycoord, radius)\n+ assert \"name_from_conf\" in job.parameters[\"query\"]\n+ # Changing the value through the class should overrule conf.\n+ mock_querier_async.MAIN_GAIA_TABLE = \"name_from_class\"\n job = mock_querier_async.cone_search_async(skycoord, radius)\n- assert job.async_ is True\n- assert job.get_phase() == \"COMPLETED\"\n- assert job.failed is False\n- # results\n- results = job.get_results()\n- assert len(results) == 3\n- for colname, attrs in column_attrs.items():\n- assert results[colname].attrs_equal(attrs)\n-\n- # Regression test for #2093 and #2099 - changing the MAIN_GAIA_TABLE\n- # had no effect.\n- # The preceding tests should have used the default value.\n- assert 'gaiadr2.gaia_source' in job.parameters['query']\n- with conf.set_temp(\"MAIN_GAIA_TABLE\", \"name_from_conf\"):\n- job = mock_querier_async.cone_search_async(skycoord, radius)\n- assert \"name_from_conf\" in job.parameters[\"query\"]\n- # Changing the value through the class should overrule conf.\n- mock_querier_async.MAIN_GAIA_TABLE = \"name_from_class\"\n- job = mock_querier_async.cone_search_async(skycoord, radius)\n- assert \"name_from_class\" in job.parameters[\"query\"]\n-\n- def test_load_data(self):\n- dummy_handler = DummyTapHandler()\n- tap = GaiaClass(dummy_handler, dummy_handler, show_server_messages=False)\n-\n- ids = \"1,2,3,4\"\n- retrieval_type = \"epoch_photometry\"\n- verbose = True\n- output_file = os.path.abspath(\"output_file\")\n- path_to_end_with = os.path.join(\"gaia\", \"test\", \"output_file\")\n- if not output_file.endswith(path_to_end_with):\n- output_file = os.path.abspath(path_to_end_with)\n-\n- tap.load_data(ids=ids,\n- retrieval_type=retrieval_type,\n- valid_data=True,\n- verbose=verbose,\n- output_file=output_file)\n-\n- parameters = {\n- \"params_dict\": {\n- \"VALID_DATA\": \"true\",\n- \"ID\": ids,\n- \"FORMAT\": \"votable\",\n- \"RETRIEVAL_TYPE\": retrieval_type,\n- \"DATA_STRUCTURE\": \"INDIVIDUAL\",\n- \"USE_ZIP_ALWAYS\": \"true\",\n- },\n- \"output_file\": dummy_handler._DummyTapHandler__parameters[\"output_file\"],\n- \"verbose\": verbose,\n- }\n- dummy_handler.check_call('load_data', parameters)\n-\n- def test_get_datalinks(self):\n- dummy_handler = DummyTapHandler()\n- tap = GaiaClass(dummy_handler, dummy_handler, show_server_messages=False)\n- ids = [\"1\", \"2\", \"3\", \"4\"]\n- verbose = True\n- tap.get_datalinks(ids, verbose)\n- dummy_handler.check_call(\"get_datalinks\", {\"ids\": ids, \"verbose\": verbose})\n-\n- def test_xmatch(self, mock_querier_async):\n- # missing table A\n- with pytest.raises(ValueError) as err:\n- mock_querier_async.cross_match(\n- full_qualified_table_name_b='schemaB.tableB',\n- results_table_name='results',\n- )\n- assert \"Table name A argument is mandatory\" in err.value.args[0]\n- # missing schema A\n- with pytest.raises(ValueError) as err:\n- mock_querier_async.cross_match(\n- full_qualified_table_name_a='tableA',\n- full_qualified_table_name_b='schemaB.tableB',\n- results_table_name='results',\n- )\n- assert \"Not found schema name in full qualified table A: 'tableA'\" \\\n- in err.value.args[0]\n- # missing table B\n- with pytest.raises(ValueError) as err:\n- mock_querier_async.cross_match(\n- full_qualified_table_name_a='schemaA.tableA',\n- results_table_name='results',\n- )\n- assert \"Table name B argument is mandatory\" in err.value.args[0]\n- # missing schema B\n- with pytest.raises(ValueError) as err:\n- mock_querier_async.cross_match(\n- full_qualified_table_name_a='schemaA.tableA',\n- full_qualified_table_name_b='tableB',\n- results_table_name='results',\n- )\n- assert \"Not found schema name in full qualified table B: 'tableB'\" \\\n- in err.value.args[0]\n- # missing results table\n- with pytest.raises(ValueError) as err:\n- mock_querier_async.cross_match(\n- full_qualified_table_name_a='schemaA.tableA',\n- full_qualified_table_name_b='schemaB.tableB',\n- )\n- assert \"Results table name argument is mandatory\" in err.value.args[0]\n- # wrong results table (with schema)\n- with pytest.raises(ValueError) as err:\n- mock_querier_async.cross_match(\n- full_qualified_table_name_a='schemaA.tableA',\n- full_qualified_table_name_b='schemaB.tableB',\n- results_table_name='schema.results',\n- )\n- assert \"Please, do not specify schema for 'results_table_name'\" \\\n- in err.value.args[0]\n- # radius < 0.1\n- with pytest.raises(ValueError) as err:\n- mock_querier_async.cross_match(\n- full_qualified_table_name_a='schemaA.tableA',\n- full_qualified_table_name_b='schemaB.tableB',\n- results_table_name='results',\n- radius=0.01,\n- )\n- assert \"Invalid radius value. Found 0.01, valid range is: 0.1 to 10.0\" \\\n- in err.value.args[0]\n- # radius > 10.0\n- with pytest.raises(ValueError) as err:\n- mock_querier_async.cross_match(\n- full_qualified_table_name_a='schemaA.tableA',\n- full_qualified_table_name_b='schemaB.tableB',\n- results_table_name='results',\n- radius=10.1\n- )\n- assert \"Invalid radius value. Found 10.1, valid range is: 0.1 to 10.0\" \\\n- in err.value.args[0]\n- job = mock_querier_async.cross_match(\n+ assert \"name_from_class\" in job.parameters[\"query\"]\n+\n+def test_load_data():\n+ dummy_handler = DummyTapHandler()\n+ tap = GaiaClass(dummy_handler, dummy_handler, show_server_messages=False)\n+\n+ ids = \"1,2,3,4\"\n+ retrieval_type = \"epoch_photometry\"\n+ verbose = True\n+ output_file = os.path.abspath(\"output_file\")\n+ path_to_end_with = os.path.join(\"gaia\", \"test\", \"output_file\")\n+ if not output_file.endswith(path_to_end_with):\n+ output_file = os.path.abspath(path_to_end_with)\n+\n+ tap.load_data(ids=ids,\n+ retrieval_type=retrieval_type,\n+ valid_data=True,\n+ verbose=verbose,\n+ output_file=output_file)\n+\n+ parameters = {\n+ \"params_dict\": {\n+ \"VALID_DATA\": \"true\",\n+ \"ID\": ids,\n+ \"FORMAT\": \"votable\",\n+ \"RETRIEVAL_TYPE\": retrieval_type,\n+ \"DATA_STRUCTURE\": \"INDIVIDUAL\",\n+ \"USE_ZIP_ALWAYS\": \"true\",\n+ },\n+ \"output_file\": dummy_handler._DummyTapHandler__parameters[\"output_file\"],\n+ \"verbose\": verbose,\n+ }\n+ dummy_handler.check_call('load_data', parameters)\n+\n+def test_get_datalinks():\n+ dummy_handler = DummyTapHandler()\n+ tap = GaiaClass(dummy_handler, dummy_handler, show_server_messages=False)\n+ ids = [\"1\", \"2\", \"3\", \"4\"]\n+ verbose = True\n+ tap.get_datalinks(ids, verbose)\n+ dummy_handler.check_call(\"get_datalinks\", {\"ids\": ids, \"verbose\": verbose})\n+\n+def test_xmatch(mock_querier_async):\n+ # missing table A\n+ with pytest.raises(ValueError) as err:\n+ mock_querier_async.cross_match(\n+ full_qualified_table_name_b='schemaB.tableB',\n+ results_table_name='results',\n+ )\n+ assert \"Table name A argument is mandatory\" in err.value.args[0]\n+ # missing schema A\n+ with pytest.raises(ValueError) as err:\n+ mock_querier_async.cross_match(\n+ full_qualified_table_name_a='tableA',\n+ full_qualified_table_name_b='schemaB.tableB',\n+ results_table_name='results',\n+ )\n+ assert \"Not found schema name in full qualified table A: 'tableA'\" \\\n+ in err.value.args[0]\n+ # missing table B\n+ with pytest.raises(ValueError) as err:\n+ mock_querier_async.cross_match(\n+ full_qualified_table_name_a='schemaA.tableA',\n+ results_table_name='results',\n+ )\n+ assert \"Table name B argument is mandatory\" in err.value.args[0]\n+ # missing schema B\n+ with pytest.raises(ValueError) as err:\n+ mock_querier_async.cross_match(\n+ full_qualified_table_name_a='schemaA.tableA',\n+ full_qualified_table_name_b='tableB',\n+ results_table_name='results',\n+ )\n+ assert \"Not found schema name in full qualified table B: 'tableB'\" \\\n+ in err.value.args[0]\n+ # missing results table\n+ with pytest.raises(ValueError) as err:\n+ mock_querier_async.cross_match(\n+ full_qualified_table_name_a='schemaA.tableA',\n+ full_qualified_table_name_b='schemaB.tableB',\n+ )\n+ assert \"Results table name argument is mandatory\" in err.value.args[0]\n+ # wrong results table (with schema)\n+ with pytest.raises(ValueError) as err:\n+ mock_querier_async.cross_match(\n+ full_qualified_table_name_a='schemaA.tableA',\n+ full_qualified_table_name_b='schemaB.tableB',\n+ results_table_name='schema.results',\n+ )\n+ assert \"Please, do not specify schema for 'results_table_name'\" \\\n+ in err.value.args[0]\n+ # radius < 0.1\n+ with pytest.raises(ValueError) as err:\n+ mock_querier_async.cross_match(\n full_qualified_table_name_a='schemaA.tableA',\n full_qualified_table_name_b='schemaB.tableB',\n results_table_name='results',\n+ radius=0.01,\n )\n- assert job.async_ is True\n- assert job.get_phase() == \"COMPLETED\"\n- assert job.failed is False\n- job = mock_querier_async.cross_match(\n+ assert \"Invalid radius value. Found 0.01, valid range is: 0.1 to 10.0\" \\\n+ in err.value.args[0]\n+ # radius > 10.0\n+ with pytest.raises(ValueError) as err:\n+ mock_querier_async.cross_match(\n full_qualified_table_name_a='schemaA.tableA',\n full_qualified_table_name_b='schemaB.tableB',\n results_table_name='results',\n- background=True,\n+ radius=10.1\n )\n- assert job.async_ is True\n- assert job.get_phase() == \"EXECUTING\"\n- assert job.failed is False\n-\n- @patch.object(TapPlus, 'login')\n- def test_login(self, mock_login):\n- conn_handler = DummyConnHandler()\n- tapplus = TapPlus(\"http://test:1111/tap\", connhandler=conn_handler)\n- tap = GaiaClass(conn_handler, tapplus, show_server_messages=False)\n- tap.login(\"user\", \"password\")\n- assert (mock_login.call_count == 2)\n- mock_login.side_effect = HTTPError(\"Login error\")\n- tap.login(\"user\", \"password\")\n- assert (mock_login.call_count == 3)\n-\n- @patch.object(TapPlus, 'login_gui')\n- @patch.object(TapPlus, 'login')\n- def test_login_gui(self, mock_login_gui, mock_login):\n- conn_handler = DummyConnHandler()\n- tapplus = TapPlus(\"http://test:1111/tap\", connhandler=conn_handler)\n- tap = GaiaClass(conn_handler, tapplus, show_server_messages=False)\n- tap.login_gui()\n- assert (mock_login_gui.call_count == 1)\n- mock_login_gui.side_effect = HTTPError(\"Login error\")\n- tap.login(\"user\", \"password\")\n- assert (mock_login.call_count == 1)\n-\n- @patch.object(TapPlus, 'logout')\n- def test_logout(self, mock_logout):\n- conn_handler = DummyConnHandler()\n- tapplus = TapPlus(\"http://test:1111/tap\", connhandler=conn_handler)\n- tap = GaiaClass(conn_handler, tapplus, show_server_messages=False)\n- tap.logout()\n- assert (mock_logout.call_count == 2)\n- mock_logout.side_effect = HTTPError(\"Login error\")\n- tap.logout()\n- assert (mock_logout.call_count == 3)\n+ assert \"Invalid radius value. Found 10.1, valid range is: 0.1 to 10.0\" \\\n+ in err.value.args[0]\n+ job = mock_querier_async.cross_match(\n+ full_qualified_table_name_a='schemaA.tableA',\n+ full_qualified_table_name_b='schemaB.tableB',\n+ results_table_name='results',\n+ )\n+ assert job.async_ is True\n+ assert job.get_phase() == \"COMPLETED\"\n+ assert job.failed is False\n+ job = mock_querier_async.cross_match(\n+ full_qualified_table_name_a='schemaA.tableA',\n+ full_qualified_table_name_b='schemaB.tableB',\n+ results_table_name='results',\n+ background=True,\n+ )\n+ assert job.async_ is True\n+ assert job.get_phase() == \"EXECUTING\"\n+ assert job.failed is False\n+\n+@patch.object(TapPlus, 'login')\n+def test_login(mock_login):\n+ conn_handler = DummyConnHandler()\n+ tapplus = TapPlus(\"http://test:1111/tap\", connhandler=conn_handler)\n+ tap = GaiaClass(conn_handler, tapplus, show_server_messages=False)\n+ tap.login(\"user\", \"password\")\n+ assert (mock_login.call_count == 2)\n+ mock_login.side_effect = HTTPError(\"Login error\")\n+ tap.login(\"user\", \"password\")\n+ assert (mock_login.call_count == 3)\n+\n+@patch.object(TapPlus, 'login_gui')\n+@patch.object(TapPlus, 'login')\n+def test_login_gui(mock_login_gui, mock_login):\n+ conn_handler = DummyConnHandler()\n+ tapplus = TapPlus(\"http://test:1111/tap\", connhandler=conn_handler)\n+ tap = GaiaClass(conn_handler, tapplus, show_server_messages=False)\n+ tap.login_gui()\n+ assert (mock_login_gui.call_count == 1)\n+ mock_login_gui.side_effect = HTTPError(\"Login error\")\n+ tap.login(\"user\", \"password\")\n+ assert (mock_login.call_count == 1)\n+\n+@patch.object(TapPlus, 'logout')\n+def test_logout(mock_logout):\n+ conn_handler = DummyConnHandler()\n+ tapplus = TapPlus(\"http://test:1111/tap\", connhandler=conn_handler)\n+ tap = GaiaClass(conn_handler, tapplus, show_server_messages=False)\n+ tap.logout()\n+ assert (mock_logout.call_count == 2)\n+ mock_logout.side_effect = HTTPError(\"Login error\")\n+ tap.logout()\n+ assert (mock_logout.call_count == 3)\n"
}
] |
766b512c71e32d908bb84ba1b7b60b2b2f205437 | astropy/astroquery | 09.01.2023 18:59:41 | BSD 3-Clause New or Revised License | Remove `utils.tap.conn.TapConn.url_encode()`
The removed method simply called the standard library
`urllib.parse.urlencode()`. | [
{
"change_type": "MODIFY",
"old_path": "astroquery/esa/hubble/core.py",
"new_path": "astroquery/esa/hubble/core.py",
"diff": "@@ -8,6 +8,7 @@ European Space Astronomy Centre (ESAC)\n European Space Agency (ESA)\n \n \"\"\"\n+from urllib.parse import urlencode\n \n from astropy import units\n from astropy.coordinates import SkyCoord\n@@ -503,7 +504,7 @@ class ESAHubbleClass(BaseQuery):\n \n subContext = conf.EHST_TARGET_ACTION\n connHandler = self._tap._TapPlus__getconnhandler()\n- data = connHandler.url_encode(params)\n+ data = urlencode(params)\n target_response = connHandler.execute_secure(subContext, data, True)\n for line in target_response:\n target_result = json.loads(line.decode(\"utf-8\"))\n"
},
{
"change_type": "MODIFY",
"old_path": "astroquery/esa/jwst/core.py",
"new_path": "astroquery/esa/jwst/core.py",
"diff": "@@ -16,6 +16,7 @@ import shutil\n import tarfile\n import zipfile\n from datetime import datetime\n+from urllib.parse import urlencode\n \n from astropy import log\n from astropy import units\n@@ -667,9 +668,8 @@ class JwstClass(BaseQuery):\n MAST token to have access to propietary data\n \"\"\"\n subContext = conf.JWST_TOKEN\n- args = {\"token\": token}\n+ data = urlencode({\"token\": token})\n connHandler = self.__jwsttap._TapPlus__getconnhandler()\n- data = connHandler.url_encode(args)\n response = connHandler.execute_secure(subContext, data, True)\n if response.status == 403:\n print(\"ERROR: MAST tokens cannot be assigned or requested by anonymous users\")\n"
},
{
"change_type": "MODIFY",
"old_path": "astroquery/gaia/tests/DummyTapHandler.py",
"new_path": "astroquery/gaia/tests/DummyTapHandler.py",
"diff": "@@ -14,8 +14,6 @@ Created on 30 jun. 2016\n \n \n \"\"\"\n-from urllib.parse import urlencode\n-\n CONTENT_TYPE_POST_DEFAULT = \"application/x-www-form-urlencoded\"\n \n \n@@ -320,6 +318,3 @@ class DummyTapHandler:\n self.__invokedMethod = 'is_valid_user'\n self.__parameters['user_id'] = user_id\n self.__parameters['verbose'] = verbose\n-\n- def url_encode(self, data):\n- return urlencode(data)\n"
},
{
"change_type": "MODIFY",
"old_path": "astroquery/utils/tap/conn/tapconn.py",
"new_path": "astroquery/utils/tap/conn/tapconn.py",
"diff": "@@ -24,8 +24,6 @@ except ImportError:\n import mimetypes\r\n import time\r\n \r\n-from urllib.parse import urlencode\r\n-\r\n from astroquery.utils.tap.xmlparser import utils\r\n from astroquery.utils.tap import taputils\r\n from astroquery import version\r\n@@ -474,16 +472,6 @@ class TapConn:\n \"\"\"\r\n return self.__currentReason\r\n \r\n- def url_encode(self, data):\r\n- \"\"\"Encodes the provided dictionary\r\n-\r\n- Parameters\r\n- ----------\r\n- data : dictionary, mandatory\r\n- dictionary to be encoded\r\n- \"\"\"\r\n- return urlencode(data)\r\n-\r\n def find_header(self, headers, key):\r\n \"\"\"Searches for the specified keyword\r\n \r\n"
},
{
"change_type": "MODIFY",
"old_path": "astroquery/utils/tap/conn/tests/DummyConnHandler.py",
"new_path": "astroquery/utils/tap/conn/tests/DummyConnHandler.py",
"diff": "@@ -16,8 +16,6 @@ Created on 30 jun. 2016\n \"\"\"\n from astroquery.utils.tap import taputils\n \n-from urllib.parse import urlencode\n-\n import requests\n \n \n@@ -147,9 +145,6 @@ class DummyConnHandler:\n else:\n return isError\n \n- def url_encode(self, data):\n- return urlencode(data)\n-\n def get_suitable_extension(self, headers):\n return self.fileExt\n \n"
},
{
"change_type": "MODIFY",
"old_path": "astroquery/utils/tap/core.py",
"new_path": "astroquery/utils/tap/core.py",
"diff": "@@ -13,6 +13,8 @@ European Space Agency (ESA)\n Created on 30 jun. 2016\r\n Modified on 1 jun. 2021 by mhsarmiento\r\n \"\"\"\r\n+from urllib.parse import urlencode\n+\n from astroquery.utils.tap import taputils\r\n from astroquery.utils.tap.conn.tapconn import TapConn\r\n from astroquery.utils.tap.xmlparser.tableSaxParser import TableSaxParser\r\n@@ -554,7 +556,7 @@ class Tap:\n return jobs\r\n \r\n def __appendData(self, args):\r\n- data = self.__connHandler.url_encode(args)\r\n+ data = urlencode(args)\n result = \"\"\r\n firtsTime = True\r\n for k in data:\r\n@@ -633,9 +635,8 @@ class Tap:\n args['PHASE'] = 'RUN'\r\n if name is not None:\r\n args['jobname'] = name\r\n- data = self.__connHandler.url_encode(args)\r\n response = self.__connHandler.execute_tappost(subcontext=context,\r\n- data=data,\r\n+ data=urlencode(args),\n verbose=verbose)\r\n if verbose:\r\n print(response.status, response.reason)\r\n@@ -847,7 +848,7 @@ class TapPlus(Tap):\n connHandler = self.__getconnhandler()\r\n if not isinstance(params_dict, dict):\r\n raise ValueError(\"Parameters dictionary expected\")\r\n- data = connHandler.url_encode(params_dict)\r\n+ data = urlencode(params_dict)\n if verbose:\r\n print(f\"Data request: {data}\")\r\n response = connHandler.execute_datapost(data=data, verbose=verbose)\r\n@@ -1371,8 +1372,7 @@ class TapPlus(Tap):\n \"username\": usr,\r\n \"password\": pwd}\r\n connHandler = self.__getconnhandler()\r\n- data = connHandler.url_encode(args)\r\n- response = connHandler.execute_secure(subContext, data, verbose)\r\n+ response = connHandler.execute_secure(subContext, urlencode(args), verbose)\n if verbose:\r\n print(response.status, response.reason)\r\n print(response.getheaders())\r\n@@ -1568,8 +1568,7 @@ class TapPlus(Tap):\n \"DELETE\": \"TRUE\",\r\n \"FORCE_REMOVAL\": \"FALSE\"}\r\n connHandler = self.__getconnhandler()\r\n- data = connHandler.url_encode(args)\r\n- response = connHandler.execute_upload(data, verbose=verbose)\r\n+ response = connHandler.execute_upload(urlencode(args), verbose=verbose)\n if verbose:\r\n print(response.status, response.reason)\r\n print(response.getheaders())\r\n@@ -1620,8 +1619,7 @@ class TapPlus(Tap):\n args = self.get_args_4_rename_table(table_name, table_name, new_column_names_dict)\r\n \r\n connHandler = self.__getconnhandler()\r\n- data = connHandler.url_encode(args)\r\n- response = connHandler.execute_table_tool(data, verbose=verbose)\r\n+ response = connHandler.execute_table_tool(urlencode(args), verbose=verbose)\n \r\n if verbose:\r\n print(response.status, response.reason)\r\n@@ -1724,8 +1722,7 @@ class TapPlus(Tap):\n list_of_changes)\r\n \r\n connHandler = self.__getconnhandler()\r\n- data = connHandler.url_encode(args)\r\n- response = connHandler.execute_table_edit(data, verbose=verbose)\r\n+ response = connHandler.execute_table_edit(urlencode(args), verbose=verbose)\n if verbose:\r\n print(response.status, response.reason)\r\n print(response.getheaders())\r\n@@ -1897,8 +1894,7 @@ class TapPlus(Tap):\n \"DEC\": str(dec_column_name),\r\n }\r\n connHandler = self.__getconnhandler()\r\n- data = connHandler.url_encode(args)\r\n- response = connHandler.execute_table_edit(data, verbose=verbose)\r\n+ response = connHandler.execute_table_edit(urlencode(args), verbose=verbose)\n isError = connHandler.check_launch_response_status(response,\r\n verbose,\r\n 200)\r\n@@ -1995,10 +1991,8 @@ class TapPlus(Tap):\n flag to display information about the process\r\n \"\"\"\r\n subContext = \"logout\"\r\n- args = {}\r\n connHandler = self.__getconnhandler()\r\n- data = connHandler.url_encode(args)\r\n- response = connHandler.execute_secure(subContext, data)\r\n+ response = connHandler.execute_secure(subContext, \"\")\n if verbose:\r\n print(response.status, response.reason)\r\n print(response.getheaders())\r\n"
},
{
"change_type": "MODIFY",
"old_path": "astroquery/utils/tap/model/job.py",
"new_path": "astroquery/utils/tap/model/job.py",
"diff": "@@ -16,6 +16,7 @@ Created on 30 jun. 2016\n \"\"\"\r\n \r\n import time\r\n+from urllib.parse import urlencode\n \r\n from astroquery.utils.tap.model import modelutils\r\n from astroquery.utils.tap.xmlparser import utils\r\n@@ -110,12 +111,9 @@ class Job:\n def __change_phase(self, phase, verbose=False):\r\n if self._phase == 'PENDING':\r\n context = f\"async/{self.jobid}/phase\"\r\n- args = {\r\n- \"PHASE\": str(phase)}\r\n- data = self.connHandler.url_encode(args)\r\n- response = self.connHandler.execute_tappost(subcontext=context,\r\n- data=data,\r\n- verbose=verbose)\r\n+ response = self.connHandler.execute_tappost(\n+ subcontext=context, data=urlencode({\"PHASE\": phase}), verbose=verbose\n+ )\n if verbose:\r\n print(response.status, response.reason)\r\n print(response.getheaders())\r\n@@ -150,11 +148,8 @@ class Job:\n if self._phase == 'PENDING':\r\n # send post parameter/value\r\n context = f\"async/{self.jobid}\"\r\n- args = {\r\n- name: str(value)}\r\n- data = self.connHandler.url_encode(args)\r\n response = self.connHandler.execute_tappost(subcontext=context,\r\n- data=data,\r\n+ data=urlencode({name: value}),\n verbose=verbose)\r\n if verbose:\r\n print(response.status, response.reason)\r\n"
},
{
"change_type": "MODIFY",
"old_path": "astroquery/utils/tap/tests/test_tap.py",
"new_path": "astroquery/utils/tap/tests/test_tap.py",
"diff": "@@ -14,6 +14,7 @@ Created on 30 jun. 2016\n \"\"\"\n import os\n from unittest.mock import patch\n+from urllib.parse import quote_plus, urlencode\n \n import numpy as np\n import pytest\n@@ -156,17 +157,13 @@ def test_launch_sync_job():\n jobData = utils.read_file_content(jobDataFile)\n responseLaunchJob.set_data(method='POST', body=jobData)\n query = 'select top 5 * from table'\n- dTmp = {\"q\": query}\n- dTmpEncoded = connHandler.url_encode(dTmp)\n- p = dTmpEncoded.find(\"=\")\n- q = dTmpEncoded[p + 1:]\n dictTmp = {\n \"REQUEST\": \"doQuery\",\n \"LANG\": \"ADQL\",\n \"FORMAT\": \"votable\",\n \"tapclient\": str(tap.tap_client_id),\n \"PHASE\": \"RUN\",\n- \"QUERY\": str(q)}\n+ \"QUERY\": quote_plus(query)}\n sortedKey = taputils.taputil_create_sorted_dict_key(dictTmp)\n jobRequest = f\"sync?{sortedKey}\"\n connHandler.set_response(jobRequest, responseLaunchJob)\n@@ -220,17 +217,13 @@ def test_launch_sync_job_redirect():\n ]\n responseLaunchJob.set_data(method='POST')\n query = 'select top 5 * from table'\n- dTmp = {\"q\": query}\n- dTmpEncoded = connHandler.url_encode(dTmp)\n- p = dTmpEncoded.find(\"=\")\n- q = dTmpEncoded[p + 1:]\n dictTmp = {\n \"REQUEST\": \"doQuery\",\n \"LANG\": \"ADQL\",\n \"FORMAT\": \"votable\",\n \"tapclient\": str(tap.tap_client_id),\n \"PHASE\": \"RUN\",\n- \"QUERY\": str(q)}\n+ \"QUERY\": quote_plus(query)}\n sortedKey = taputils.taputil_create_sorted_dict_key(dictTmp)\n jobRequest = f\"sync?{sortedKey}\"\n connHandler.set_response(jobRequest, responseLaunchJob)\n@@ -844,9 +837,7 @@ def test_rename_table():\n \"new_table_name\": newTableName,\n \"table_name\": tableName,\n }\n- data = connHandler.url_encode(dictArgs)\n- req = f\"TableTool?{data}\"\n- connHandler.set_response(req, responseRenameTable)\n+ connHandler.set_response(f\"TableTool?{urlencode(dictArgs)}\", responseRenameTable)\n tap.rename_table(table_name=tableName, new_table_name=newTableName, new_column_names_dict=newColumnNames)\n \n \n"
}
] |
90c5652a04ed8108b6b16ab9d2be54165bb4e37f | astropy/astroquery | 09.01.2023 19:01:34 | BSD 3-Clause New or Revised License | Remove an utility function for reading test data
`astroquery.utils.tap.xmlparser.utils.read_file_content()` has been
replaced with the standard library `pathlib.Path.read_text()`.
Furthermore, in the modules where multiple test functions previously
used `read_file_content()` to read the data files, they are now read
once at the module level instead. | [
{
"change_type": "MODIFY",
"old_path": "astroquery/esa/jwst/tests/test_jwsttap.py",
"new_path": "astroquery/esa/jwst/tests/test_jwsttap.py",
"diff": "@@ -10,6 +10,7 @@ European Space Agency (ESA)\n \"\"\"\n import os\n import shutil\n+from pathlib import Path\n from unittest.mock import MagicMock\n \n import astropy.units as u\n@@ -28,12 +29,14 @@ from astroquery.simbad import Simbad\n from astroquery.utils.tap.conn.tests.DummyConnHandler import DummyConnHandler\n from astroquery.utils.tap.conn.tests.DummyResponse import DummyResponse\n from astroquery.utils.tap.core import TapPlus\n-from astroquery.utils.tap.xmlparser import utils\n from astroquery.vizier import Vizier\n \n from astroquery.esa.jwst import conf\n \n \n+JOB_DATA = (Path(__file__).with_name(\"data\") / \"job_1.vot\").read_text()\n+\n+\n def data_path(filename):\n data_dir = os.path.join(os.path.dirname(__file__), 'data')\n return os.path.join(data_dir, filename)\n@@ -234,9 +237,7 @@ class TestTap:\n # Launch response: we use default response because the\n # query contains decimals\n responseLaunchJob = DummyResponse(200)\n- jobDataFile = data_path('job_1.vot')\n- jobData = utils.read_file_content(jobDataFile)\n- responseLaunchJob.set_data(method='POST', body=jobData)\n+ responseLaunchJob.set_data(method='POST', body=JOB_DATA)\n # The query contains decimals: force default response\n connHandler.set_default_response(responseLaunchJob)\n sc = SkyCoord(ra=29.0, dec=15.0, unit=(u.degree, u.degree),\n@@ -379,9 +380,7 @@ class TestTap:\n connHandler.set_response(req, responsePhase)\n # Results response\n responseResultsJob = DummyResponse(200)\n- jobDataFile = data_path('job_1.vot')\n- jobData = utils.read_file_content(jobDataFile)\n- responseResultsJob.set_data(method='GET', body=jobData)\n+ responseResultsJob.set_data(method='GET', body=JOB_DATA)\n req = \"async/\" + jobid + \"/results/result\"\n connHandler.set_response(req, responseResultsJob)\n sc = SkyCoord(ra=29.0, dec=15.0, unit=(u.degree, u.degree),\n@@ -442,9 +441,7 @@ class TestTap:\n # Launch response: we use default response because the\n # query contains decimals\n responseLaunchJob = DummyResponse(200)\n- jobDataFile = data_path('job_1.vot')\n- jobData = utils.read_file_content(jobDataFile)\n- responseLaunchJob.set_data(method='POST', body=jobData)\n+ responseLaunchJob.set_data(method='POST', body=JOB_DATA)\n ra = 19.0\n dec = 20.0\n sc = SkyCoord(ra=ra, dec=dec, unit=(u.degree, u.degree), frame='icrs')\n@@ -544,9 +541,7 @@ class TestTap:\n connHandler.set_response(req, responsePhase)\n # Results response\n responseResultsJob = DummyResponse(200)\n- jobDataFile = data_path('job_1.vot')\n- jobData = utils.read_file_content(jobDataFile)\n- responseResultsJob.set_data(method='GET', body=jobData)\n+ responseResultsJob.set_data(method='GET', body=JOB_DATA)\n req = \"async/\" + jobid + \"/results/result\"\n connHandler.set_response(req, responseResultsJob)\n job = tap.cone_search(sc, radius, async_job=True)\n"
},
{
"change_type": "MODIFY",
"old_path": "astroquery/gaia/tests/test_gaiatap.py",
"new_path": "astroquery/gaia/tests/test_gaiatap.py",
"diff": "@@ -30,11 +30,10 @@ from astroquery.utils.tap.conn.tests.DummyResponse import DummyResponse\n import astropy.units as u\n from astropy.coordinates.sky_coordinate import SkyCoord\n import numpy as np\n-from astroquery.utils.tap.xmlparser import utils\n from astroquery.utils.tap.core import TapPlus\n \n \n-job_data = utils.read_file_content(Path(__file__).parent.joinpath(\"data\", \"job_1.vot\"))\n+job_data = (Path(__file__).with_name(\"data\") / \"job_1.vot\").read_text()\n \n skycoord = SkyCoord(ra=19 * u.deg, dec=20 * u.deg, frame=\"icrs\")\n \n"
},
{
"change_type": "MODIFY",
"old_path": "astroquery/utils/tap/model/tests/test_job.py",
"new_path": "astroquery/utils/tap/model/tests/test_job.py",
"diff": "@@ -14,18 +14,13 @@ Created on 30 jun. 2016\n \n \n \"\"\"\n-import os\n+from pathlib import Path\n+\n import pytest\n \n from astroquery.utils.tap.model.job import Job\n from astroquery.utils.tap.conn.tests.DummyConnHandler import DummyConnHandler\n from astroquery.utils.tap.conn.tests.DummyResponse import DummyResponse\n-from astroquery.utils.tap.xmlparser import utils\n-\n-\n-def data_path(filename):\n- data_dir = os.path.join(os.path.dirname(__file__), 'data')\n- return os.path.join(data_dir, filename)\n \n \n def test_job_basic():\n@@ -53,9 +48,9 @@ def test_job_get_results(capsys, tmpdir):\n \n responseCheckPhase.set_status_code(200)\n responseGetData = DummyResponse(500)\n- jobContentFileName = data_path('result_1.vot')\n- jobContent = utils.read_file_content(jobContentFileName)\n- responseGetData.set_data(method='GET', body=jobContent)\n+ responseGetData.set_data(\n+ method=\"GET\",\n+ body=(Path(__file__).with_name(\"data\") / \"result_1.vot\").read_text())\n dataRequest = f\"async/{jobid}/results/result\"\n connHandler.set_response(dataRequest, responseGetData)\n \n"
},
{
"change_type": "MODIFY",
"old_path": "astroquery/utils/tap/tests/test_tap.py",
"new_path": "astroquery/utils/tap/tests/test_tap.py",
"diff": "@@ -12,7 +12,7 @@ European Space Agency (ESA)\n \n Created on 30 jun. 2016\n \"\"\"\n-import os\n+from pathlib import Path\n from unittest.mock import patch\n from urllib.parse import quote_plus, urlencode\n \n@@ -25,22 +25,17 @@ from astroquery.utils.tap.model.tapcolumn import TapColumn\n from astroquery.utils.tap.conn.tests.DummyConnHandler import DummyConnHandler\n from astroquery.utils.tap.conn.tests.DummyResponse import DummyResponse\n from astroquery.utils.tap.core import TapPlus\n-from astroquery.utils.tap.xmlparser import utils\n from astroquery.utils.tap import taputils\n \n \n-def data_path(filename):\n- data_dir = os.path.join(os.path.dirname(__file__), 'data')\n- return os.path.join(data_dir, filename)\n+TEST_DATA = {f.name: f.read_text() for f in Path(__file__).with_name(\"data\").iterdir()}\n \n \n def test_load_tables():\n connHandler = DummyConnHandler()\n tap = TapPlus(\"http://test:1111/tap\", connhandler=connHandler)\n responseLoadTable = DummyResponse(500)\n- tableDataFile = data_path('test_tables.xml')\n- tableData = utils.read_file_content(tableDataFile)\n- responseLoadTable.set_data(method='GET', body=tableData)\n+ responseLoadTable.set_data(method='GET', body=TEST_DATA[\"test_tables.xml\"])\n tableRequest = \"tables\"\n connHandler.set_response(tableRequest, responseLoadTable)\n with pytest.raises(Exception):\n@@ -77,9 +72,7 @@ def test_load_tables_parameters():\n connHandler = DummyConnHandler()\n tap = TapPlus(\"http://test:1111/tap\", connhandler=connHandler)\n responseLoadTable = DummyResponse(200)\n- tableDataFile = data_path('test_tables.xml')\n- tableData = utils.read_file_content(tableDataFile)\n- responseLoadTable.set_data(method='GET', body=tableData)\n+ responseLoadTable.set_data(method='GET', body=TEST_DATA[\"test_tables.xml\"])\n tableRequest = \"tables\"\n connHandler.set_response(tableRequest, responseLoadTable)\n \n@@ -125,9 +118,7 @@ def test_load_table():\n tap.load_table()\n \n responseLoadTable = DummyResponse(500)\n- tableDataFile = data_path('test_table1.xml')\n- tableData = utils.read_file_content(tableDataFile)\n- responseLoadTable.set_data(method='GET', body=tableData)\n+ responseLoadTable.set_data(method='GET', body=TEST_DATA[\"test_table1.xml\"])\n tableSchema = \"public\"\n tableName = \"table1\"\n fullQualifiedTableName = f\"{tableSchema}.{tableName}\"\n@@ -153,9 +144,7 @@ def test_launch_sync_job():\n connHandler = DummyConnHandler()\n tap = TapPlus(\"http://test:1111/tap\", connhandler=connHandler)\n responseLaunchJob = DummyResponse(500)\n- jobDataFile = data_path('job_1.vot')\n- jobData = utils.read_file_content(jobDataFile)\n- responseLaunchJob.set_data(method='POST', body=jobData)\n+ responseLaunchJob.set_data(method='POST', body=TEST_DATA[\"job_1.vot\"])\n query = 'select top 5 * from table'\n dictTmp = {\n \"REQUEST\": \"doQuery\",\n@@ -229,9 +218,7 @@ def test_launch_sync_job_redirect():\n connHandler.set_response(jobRequest, responseLaunchJob)\n # Results response\n responseResultsJob = DummyResponse(500)\n- jobDataFile = data_path('job_1.vot')\n- jobData = utils.read_file_content(jobDataFile)\n- responseResultsJob.set_data(method='GET', body=jobData)\n+ responseResultsJob.set_data(method='GET', body=TEST_DATA[\"job_1.vot\"])\n connHandler.set_response(resultsReq, responseResultsJob)\n \n with pytest.raises(Exception):\n@@ -316,9 +303,7 @@ def test_launch_async_job():\n connHandler.set_response(req, responsePhase)\n # Results response\n responseResultsJob = DummyResponse(500)\n- jobDataFile = data_path('job_1.vot')\n- jobData = utils.read_file_content(jobDataFile)\n- responseResultsJob.set_data(method='GET', body=jobData)\n+ responseResultsJob.set_data(method='GET', body=TEST_DATA[\"job_1.vot\"])\n req = f\"async/{jobid}/results/result\"\n connHandler.set_response(req, responseResultsJob)\n \n@@ -398,9 +383,7 @@ def test_start_job():\n connHandler.set_response(req, responsePhase)\n # Results response\n responseResultsJob = DummyResponse(200)\n- jobDataFile = data_path('job_1.vot')\n- jobData = utils.read_file_content(jobDataFile)\n- responseResultsJob.set_data(method='GET', body=jobData)\n+ responseResultsJob.set_data(method='GET', body=TEST_DATA[\"job_1.vot\"])\n req = f\"async/{jobid}/results/result\"\n connHandler.set_response(req, responseResultsJob)\n \n@@ -490,9 +473,7 @@ def test_job_parameters():\n connHandler.set_response(req, responsePhase)\n # Results response\n responseResultsJob = DummyResponse(200)\n- jobDataFile = data_path('job_1.vot')\n- jobData = utils.read_file_content(jobDataFile)\n- responseResultsJob.set_data(method='GET', body=jobData)\n+ responseResultsJob.set_data(method='GET', body=TEST_DATA[\"job_1.vot\"])\n req = f\"async/{jobid}/results/result\"\n connHandler.set_response(req, responseResultsJob)\n \n@@ -526,9 +507,7 @@ def test_list_async_jobs():\n connHandler = DummyConnHandler()\n tap = TapPlus(\"http://test:1111/tap\", connhandler=connHandler)\n response = DummyResponse(500)\n- jobDataFile = data_path('jobs_list.xml')\n- jobData = utils.read_file_content(jobDataFile)\n- response.set_data(method='GET', body=jobData)\n+ response.set_data(method='GET', body=TEST_DATA[\"jobs_list.xml\"])\n req = \"async\"\n connHandler.set_response(req, response)\n with pytest.raises(Exception):\n@@ -549,9 +528,7 @@ def test_data():\n data_context=\"data\",\n connhandler=connHandler)\n responseResultsJob = DummyResponse(200)\n- jobDataFile = data_path('job_1.vot')\n- jobData = utils.read_file_content(jobDataFile)\n- responseResultsJob.set_data(method='GET', body=jobData)\n+ responseResultsJob.set_data(method='GET', body=TEST_DATA[\"job_1.vot\"])\n req = \"?ID=1%2C2&format=votable\"\n connHandler.set_response(req, responseResultsJob)\n req = \"?ID=1%2C2\"\n@@ -585,9 +562,7 @@ def test_datalink():\n datalink_context=\"datalink\",\n connhandler=connHandler)\n responseResultsJob = DummyResponse(200)\n- jobDataFile = data_path('job_1.vot')\n- jobData = utils.read_file_content(jobDataFile)\n- responseResultsJob.set_data(method='GET', body=jobData)\n+ responseResultsJob.set_data(method='GET', body=TEST_DATA[\"job_1.vot\"])\n req = \"links?ID=1,2\"\n connHandler.set_response(req, responseResultsJob)\n \n@@ -750,9 +725,7 @@ def test_update_user_table():\n connHandler = DummyConnHandler()\n tap = TapPlus(\"http://test:1111/tap\", connhandler=connHandler)\n dummyResponse = DummyResponse(200)\n- tableDataFile = data_path('test_table_update.xml')\n- tableData = utils.read_file_content(tableDataFile)\n- dummyResponse.set_data(method='GET', body=tableData)\n+ dummyResponse.set_data(method='GET', body=TEST_DATA[\"test_table_update.xml\"])\n tableRequest = f\"tables?tables={tableName}\"\n connHandler.set_response(tableRequest, dummyResponse)\n \n@@ -818,9 +791,7 @@ def test_rename_table():\n connHandler = DummyConnHandler()\n tap = TapPlus(\"http://test:1111/tap\", connhandler=connHandler)\n dummyResponse = DummyResponse(200)\n- tableDataFile = data_path('test_table_rename.xml')\n- tableData = utils.read_file_content(tableDataFile)\n- dummyResponse.set_data(method='GET', body=tableData)\n+ dummyResponse.set_data(method='GET', body=TEST_DATA[\"test_table_rename.xml\"])\n \n with pytest.raises(Exception):\n tap.rename_table()\n"
},
{
"change_type": "MODIFY",
"old_path": "astroquery/utils/tap/xmlparser/utils.py",
"new_path": "astroquery/utils/tap/xmlparser/utils.py",
"diff": "@@ -53,13 +53,6 @@ def get_suitable_astropy_format(output_format):\n return output_format\n \n \n-def read_file_content(file_path):\n- file_handler = open(file_path, 'r')\n- file_content = file_handler.read()\n- file_handler.close()\n- return file_content\n-\n-\n def modify_unrecognized_table_units(table):\n \"\"\"Modifies the units of an input table column in place\n \"\"\"\n"
}
] |