query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
sequencelengths 30
30
| negative_scores
sequencelengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Tests gotoField if there is a mismatch between MCP and guider. | def test_goto_field_cartridge_mismatch(self):
sopTester.updateModel('guider', TestHelper.guiderState['bossLoaded'])
mcpState = TestHelper.mcpState['boss_science']
mcpState.update({'instrumentNum': [15]})
sopTester.updateModel('mcp', mcpState)
cmdState = self.actorState.gotoField
cmdState.reinitialize(self.cmd)
masterThread.goto_field(self.cmd, cmdState, myGlobals.actorState)
self._check_cmd(0, 14, 0, 0, finish=True, didFail=True) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_goto_field_apogee_no_guider(self):\n cmdState = self.actorState.gotoField\n cmdState.reinitialize(self.cmd)\n cmdState.doGuider = False\n self._goto_feld_apogee(3, 11, 0, 0, cmdState)",
"def test_goto_field_apogee(self):\n cmdState = self.actorState.gotoField\n cmdState.reinitialize(self.cmd)\n self._goto_feld_apogee(13, 46, 0, 0, cmdState)",
"def test_goto_field_boss_flat_on_fails(self):\n sopTester.updateModel('mcp', TestHelper.mcpState['all_off'])\n cmdState = self.actorState.gotoField\n cmdState.reinitialize(self.cmd)\n self.cmd.failOn = 'mcp ff.on'\n self._goto_field_boss(16, 71, 0, 1, cmdState, didFail=True, finish=True)",
"def test_goto_field_boss_guider(self):\n sopTester.updateModel('mcp', TestHelper.mcpState['all_off'])\n sopTester.updateModel('guider', TestHelper.guiderState['guiderOnDecenter'])\n cmdState = self.actorState.gotoField\n cmdState.reinitialize(self.cmd)\n cmdState.doSlew = False\n cmdState.doHartmann = False\n cmdState.doCalibs = False\n cmdState.arcTime = 0\n cmdState.flatTime = 0\n self._goto_field_boss(9, 37, 0, 0, cmdState)",
"def test_goto_field_apogee_no_slew(self):\n cmdState = self.actorState.gotoField\n cmdState.reinitialize(self.cmd)\n cmdState.doSlew = False\n self._goto_feld_apogee(9, 37, 0, 0, cmdState)",
"def test_goto_field_boss_ffs_open_fails(self):\n sopTester.updateModel('mcp', TestHelper.mcpState['all_off'])\n cmdState = self.actorState.gotoField\n cmdState.reinitialize(self.cmd)\n self.cmd.failOn = 'mcp ffs.open'\n self._goto_field_boss(21, 102, 1, 1, cmdState, didFail=True, finish=True)",
"def test_goto_field_apogee_no_slew_decenter_off(self):\n sopTester.updateModel('mcp', TestHelper.mcpState['all_off'])\n sopTester.updateModel('guider', TestHelper.guiderState['guiderOnDecenter'])\n cmdState = self.actorState.gotoField\n cmdState.reinitialize(self.cmd)\n cmdState.doSlew = False\n self._goto_feld_apogee(9, 37, 0, 0, cmdState)",
"def test_goto_field_boss_hartmann_blue_fails(self):\n\n sopTester.updateModel('mcp', TestHelper.mcpState['all_off'])\n sopTester.updateModel('hartmann', TestHelper.hartmannState['blue_fails'])\n\n cmdState = self.actorState.gotoField\n cmdState.reinitialize(self.cmd)\n\n self._goto_field_boss(12, 37, 0, 0, cmdState, didFail=True, finish=True)",
"def test_goto_field_apogee_bypass_gangToCart(self):\n self._prep_bypass('gangToCart', clear=True)\n cmdState = self.actorState.gotoField\n cmdState.reinitialize(self.cmd)\n self._goto_feld_apogee(13, 44, 4, 0, cmdState)",
"def test_goto_field_boss_calibs(self):\n sopTester.updateModel('mcp', TestHelper.mcpState['all_off'])\n cmdState = self.actorState.gotoField\n cmdState.reinitialize(self.cmd)\n cmdState.doSlew = False\n cmdState.doHartmann = False\n cmdState.doGuider = False\n self._goto_field_boss(10, 57, 0, 0, cmdState)",
"def test_goto_field_apogee_no_slew_shutter_open(self):\n sopTester.updateModel('apogee', TestHelper.apogeeState['B_open'])\n cmdState = self.actorState.gotoField\n cmdState.reinitialize(self.cmd)\n cmdState.doSlew = False\n self._goto_feld_apogee(10, 37, 0, 0, cmdState)",
"def test_defining_only_or_defer_on_nonexistant_fields_fails(self):",
"def test_goto_field_boss_slew(self):\n sopTester.updateModel('mcp', TestHelper.mcpState['all_off'])\n cmdState = self.actorState.gotoField\n cmdState.reinitialize(self.cmd)\n cmdState.doGuider = False\n cmdState.doHartmann = False\n cmdState.doCalibs = False\n cmdState.arcTime = 0\n cmdState.flatTime = 0\n self._goto_field_boss(3, 26, 0, 0, cmdState)",
"def _validator_target(self, field, value):\n if not REG.match(value):\n self._error(field, \"{} is not a valid target\".format(value))",
"def verifyField(self, pv, field, reference):\n full_pv = pv + \".\" + field\n if (caget(full_pv) != reference):\n msg = \"ERROR: \" + full_pv + \" not equal to \" + str(reference)\n raise Exception(__name__ + msg)\n\n return self.__g.SUCCESS",
"def test_mutate_field(self):\n # Test adding a field\n with self.assertRaises(ValueError):\n self.email.add_field('', '')\n\n self.email.add_field(self.key, self.regex)\n\n found_key = False\n found_regex = r''\n for field in self.email.fields:\n if field['key'] == self.key:\n found_key = True\n found_regex = field['regex']\n\n self.assertTrue(found_key)\n self.assertEqual(found_regex, self.regex)\n\n # Test getting a field\n with self.assertRaises(LookupError):\n self.email.get_field('')\n\n field = self.email.get_field(self.key)\n self.assertEqual(\n field, {'key': self.key, 'regex': self.regex, 'value': []})\n\n # Test removing a field\n with self.assertRaises(LookupError):\n self.email.remove_field('')\n\n self.email.remove_field(self.key)\n\n found_key = False\n found_regex = r''\n for field in self.email.fields:\n if field['key'] == self.key:\n found_key = True\n found_regex = field['regex']\n\n self.assertFalse(found_key)\n self.assertNotEqual(found_regex, self.regex)",
"def test_goto_field_boss_hartmann(self):\n sopTester.updateModel('mcp', TestHelper.mcpState['all_off'])\n cmdState = self.actorState.gotoField\n cmdState.reinitialize(self.cmd)\n cmdState.doSlew = False\n cmdState.doCalibs = False\n cmdState.arcTime = 0\n cmdState.flatTime = 0\n cmdState.doGuider = False\n self._goto_field_boss(5, 29, 0, 0, cmdState)",
"def test_invalid_field_access(self):\r\n out = self.c.post(self.url, {'op': 'delete hints', 'field': 'all your private data'})\r\n print out\r\n self.assertTrue('an invalid field was accessed' in out.content)",
"def test_field_id_lt(self):\n field = '<6'\n ref_idx = [0]\n self.res=self.run_task(infile=self.rawfile,field=field,calmode=self.calmode,outfile=self.outname,outform='ASAP')\n self.assertEqual(self.res,None,\n msg='Any error occurred during calibration')\n self._compare_with_analytic(self.outname, self.line, self.baseline, ref_idx)",
"def test_51(self):\n assert 'False' == Api.requestBlock('test-51', CustomFields=True)",
"def test_field_id_gt(self):\n field = '>7'\n ref_idx = [3]\n self.res=self.run_task(infile=self.rawfile,field=field,calmode=self.calmode,outfile=self.outname,outform='ASAP')\n self.assertEqual(self.res,None,\n msg='Any error occurred during calibration')\n self._compare_with_analytic(self.outname, self.line, self.baseline, ref_idx)",
"def test_select_field():",
"def ask_for_field(self, row, col):\n field = self.map.fields[row][col]\n # return the field kind, team, and if there is an entity or not\n return field.passable, field.team, field.entity is not None",
"def checkField(fieldset, text=True):\n if text:\n print \"\\nFieldset contains the following fields:\"\n for i in range(len(fieldset.fields)):\n print fieldset.fields[i].name\n\n ulon = fieldset.U.grid.lon\n ulat = fieldset.U.grid.lat\n udep = fieldset.U.grid.depth\n vlon = fieldset.V.grid.lon\n vlat = fieldset.V.grid.lat\n vdep = fieldset.V.grid.depth\n\n if text:\n if np.all(ulon == vlon):\n print \"longitudes are the same for U and V\"\n else:\n print \"longitudes are not the same for U and V. Note that not all functions will work as intended.\"\n if np.all(ulat == vlat):\n print \"latitudes are the same for U and V\"\n else:\n print \"latitudes are not the same for U and V. Note that not all functions will work as intended.\"\n if np.all(udep == vdep):\n print \"depths are the same for U and V\"\n else:\n print \"depths are not the same for U and V. Note that not all functions will work as intended.\"\n\n return np.all(ulon == vlon) and np.all(ulat == vlat) and np.all(udep == vdep)",
"def check_field_name(field_name):\n\n error_checking.assert_is_string(field_name)\n if field_name in ALL_PREDICTOR_NAMES + ALL_TARGET_NAMES:\n return\n\n error_string = (\n '\\nField \"{0:s}\" is not valid predictor or target variable. Valid '\n 'options listed below:\\n{1:s}'\n ).format(field_name, str(ALL_PREDICTOR_NAMES + ALL_TARGET_NAMES))\n\n raise ValueError(error_string)",
"def _is_ifgoto(self, words):\n if words[0] == 'if-goto':\n if len(words) != 2:\n raise SyntaxError(\"File line {}: Invalid number of arguments for C_IFGOTO command.\".format(self._file_line))\n return True\n else:\n return False",
"def test_02_visit_again(self):",
"def test_get_critical_from_existing_fields(self):\n name = 'generic_field'\n opts = {'names': (name, ), 'alt_field': '', 'computed': False}\n expected_field = self.form.fields.get(name, None)\n actual_name, actual_field = self.form.get_critical_field(opts['names'])\n self.assertEqual(name, actual_name)\n self.assertEqual(expected_field, actual_field)",
"def assert_known_field(self, name):\n if not (name == self.id_field_name or self.is_known_field(name)):\n raise ChipsError(\"Unknown field in model %s [%s]\", self.__class__.__name__, name)",
"def check():"
] | [
"0.67638075",
"0.6552739",
"0.6286361",
"0.6170494",
"0.6125492",
"0.5995649",
"0.59449685",
"0.5715474",
"0.5527284",
"0.5455693",
"0.5430143",
"0.54134613",
"0.530795",
"0.530067",
"0.5265416",
"0.5247163",
"0.52226955",
"0.5134902",
"0.5128405",
"0.51222634",
"0.5118506",
"0.507991",
"0.5075934",
"0.50527847",
"0.5018151",
"0.50139004",
"0.50080955",
"0.5004106",
"0.49996462",
"0.49810272"
] | 0.7532309 | 0 |
Unwraps the private key into an asn1crypto.keys.RSAPrivateKey, asn1crypto.keys.DSAPrivateKey or asn1crypto.keys.ECPrivateKey object | def unwrap(self):
if self.algorithm == 'rsa':
return self.asn1['private_key'].parsed
if self.algorithm == 'dsa':
params = self.asn1['private_key_algorithm']['parameters']
return DSAPrivateKey({
'version': 0,
'p': params['p'],
'q': params['q'],
'g': params['g'],
'public_key': self.public_key.unwrap(),
'private_key': self.asn1['private_key'].parsed,
})
if self.algorithm == 'ec':
output = self.asn1['private_key'].parsed
output['parameters'] = self.asn1['private_key_algorithm']['parameters']
output['public_key'] = self.public_key.unwrap()
return output | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _unwrap_private_key_info(key_info):\n\n key_alg = key_info.algorithm\n\n if key_alg == 'rsa' or key_alg == 'rsassa_pss':\n return key_info['private_key'].parsed\n\n if key_alg == 'dsa':\n params = key_info['private_key_algorithm']['parameters']\n parsed = key_info['private_key'].parsed\n return DSAPrivateKey({\n 'version': 0,\n 'p': params['p'],\n 'q': params['q'],\n 'g': params['g'],\n 'public_key': Integer(pow(\n params['g'].native,\n parsed.native,\n params['p'].native\n )),\n 'private_key': parsed,\n })\n\n if key_alg == 'ec':\n parsed = key_info['private_key'].parsed\n parsed['parameters'] = key_info['private_key_algorithm']['parameters']\n return parsed\n\n raise ValueError('Unsupported key_info.algorithm \"%s\"' % key_info.algorithm)",
"def _wrap_privatekey(self) -> None:\n p_der = ffi.new(\"unsigned char **\")\n der_len = lib.i2d_PrivateKey(self._pkey, p_der)\n if der_len < 0:\n raise InvalidPKeyError(\"Could not serialize private key\")\n try:\n der = ffi.buffer(p_der[0], der_len)[:]\n try:\n self._key = load_der_private_key(der, password=None,\n backend=default_backend())\n except ValueError as exc:\n raise InvalidPKeyError from exc\n finally:\n lib.OPENSSL_free(p_der[0])",
"def load_private_key(self, private_key):\n if not self.curve:\n self.curve = private_key.curve\n if self.curve != private_key.curve:\n raise InvalidCurveError(\"Curve mismatch.\")\n self.private_key = private_key\n return self.private_key.get_verifying_key()",
"def solve(key_data: bytes) -> PrivateKey:\n return { # type: ignore\n Encoding.PEM: load_pem_private_key,\n Encoding.DER: load_der_private_key\n }[real_encoding](key_data, password, default_backend())",
"def get_verifying_key(private_key):\n return private_key.get_verifying_key().to_pem().decode('ascii')",
"def mk_keyobj_from_private_key(self, privkey):\n bn = BACKEND_KP.private_key_obj._backend._ffi.NULL\n bn_ptr = BACKEND_KP.private_key_obj._backend._lib.BN_bin2bn(privkey, len(privkey), bn)\n secret_val = BACKEND_KP.private_key_obj._backend._bn_to_int(bn_ptr)\n\n if self.curvetype == KeyType.ECDSA_P256v1:\n self.private_key_obj = ec.derive_private_key(secret_val, ec.SECP256R1(), default_backend())\n elif self.curvetype == KeyType.ECDSA_SECP256k1:\n self.private_key_obj = ec.derive_private_key(secret_val, ec.SECP256K1(), default_backend())\n self._get_naive_private_key_bytes()\n self.public_key_obj = self.private_key_obj.public_key()\n self._get_naive_public_key_bytes()",
"def _createPrivateKey(key):\r\n if not isinstance(key, RSAKey):\r\n raise AssertionError()\r\n if not key.hasPrivateKey():\r\n raise AssertionError()\r\n return _createPrivateRSAKey(key.n, key.e, key.d, key.p, key.q, key.dP,\r\n key.dQ, key.qInv)",
"def parse_private(data, password=None):\n\n if not isinstance(data, byte_cls):\n raise TypeError(pretty_message(\n '''\n data must be a byte string, not %s\n ''',\n type_name(data)\n ))\n\n if password is not None:\n if not isinstance(password, byte_cls):\n raise TypeError(pretty_message(\n '''\n password must be a byte string, not %s\n ''',\n type_name(password)\n ))\n else:\n password = b''\n\n # Appears to be PEM formatted\n if re.match(b'\\\\s*-----', data) is not None:\n key_type, _, data = _unarmor_pem(data, password)\n\n if key_type == 'public key':\n raise ValueError(pretty_message(\n '''\n The data specified does not appear to be a private key, but\n rather a public key\n '''\n ))\n\n if key_type == 'certificate':\n raise ValueError(pretty_message(\n '''\n The data specified does not appear to be a private key, but\n rather a certificate\n '''\n ))\n\n try:\n pki = PrivateKeyInfo.load(data)\n # Call .native to fully parse since asn1crypto is lazy\n pki.native\n return pki\n except (ValueError):\n pass # Data was not PrivateKeyInfo\n\n try:\n parsed_wrapper = EncryptedPrivateKeyInfo.load(data)\n encryption_algorithm_info = parsed_wrapper['encryption_algorithm']\n encrypted_data = parsed_wrapper['encrypted_data'].native\n decrypted_data = _decrypt_encrypted_data(encryption_algorithm_info, encrypted_data, password)\n pki = PrivateKeyInfo.load(decrypted_data)\n # Call .native to fully parse since asn1crypto is lazy\n pki.native\n return pki\n except (ValueError):\n pass # Data was not EncryptedPrivateKeyInfo\n\n try:\n parsed = RSAPrivateKey.load(data)\n # Call .native to fully parse since asn1crypto is lazy\n parsed.native\n return PrivateKeyInfo.wrap(parsed, 'rsa')\n except (ValueError):\n pass # Data was not an RSAPrivateKey\n\n try:\n parsed = DSAPrivateKey.load(data)\n # Call .native to fully parse since asn1crypto is lazy\n parsed.native\n return PrivateKeyInfo.wrap(parsed, 'dsa')\n except (ValueError):\n pass # Data was not a DSAPrivateKey\n\n try:\n parsed = ECPrivateKey.load(data)\n # Call .native to fully parse since asn1crypto is lazy\n parsed.native\n return PrivateKeyInfo.wrap(parsed, 'ec')\n except (ValueError):\n pass # Data was not an ECPrivateKey\n\n raise ValueError(pretty_message(\n '''\n The data specified does not appear to be a known private key format\n '''\n ))",
"def _get_private_key(self, privkey=None):\n\n # read private keys from keyring\n privkeys = self.gpg.list_keys(True) # True => private keys\n if len(privkeys) > 0 and privkeys[-1].has_key('fingerprint'):\n fingerprints = []\n for k in privkeys:\n fingerprints.append(k['fingerprint'])\n else:\n # no private key in keyring\n return None\n\n if privkey:\n # check for existence of private key received as argument\n # DEVEL: check for expiration as well\n if len(privkey) > 7 and len(privkey) <= 40:\n for fp in fingerprints:\n if fp.endswith(privkey):\n # work with last 16 significant chars internally,\n # even if only 8 are required in trac.ini\n privkey = fp[-16:]\n break\n # no fingerprint matching key ID\n else:\n privkey = None\n else:\n # reset invalid key ID\n privkey = None\n else:\n # select (last) private key from keyring\n privkey = fingerprints[-1][-16:]\n\n return privkey",
"def parsePrivateKey(s):\r\n return parsePEMKey(s, private=True)",
"def _serialize_private_key(private_key, password=None):\n error = None\n pvt_key_loaders = [\n load_pem_private_key, load_der_private_key\n ]\n pvt_key = None\n for loader in pvt_key_loaders:\n if not pvt_key:\n try:\n pvt_key = loader(\n private_key.encode('utf-8'),\n password=password,\n backend=default_backend()\n )\n error = False\n break\n except (ValueError, UnsupportedAlgorithm) as err:\n error = err\n if error:\n raise errors.InvalidPrivateKeyError(error)\n else:\n return pvt_key",
"def get_private_key_in_der(self):\n serialized_private = self.private_key_obj.private_bytes(\n encoding=serialization.Encoding.DER,\n format=serialization.PrivateFormat.TraditionalOpenSSL,\n encryption_algorithm=serialization.NoEncryption()\n )\n return serialized_private",
"def test_private_key_rsa(self):\n priv = \"\"\"-----BEGIN PRIVATE KEY-----\nMIIBVAIBADANBgkqhkiG9w0BAQEFAASCAT4wggE6AgEAAkEAybxDeYLbbriv2wJ2\nd0w09xGJdi7dIzgPtI6beSKkk3ILXRqj59ufj/i7RXg7RASOzZH/wmfvbBNsI5y5\nM62FDwIDAQABAkB/ayvrKd3TV0+rsyiEPVwO2cLLJNqEDjrNPm2w21K71WMVkngm\nOH0DpFePpPHQf+EdUfpRwZNdXhyt52MxC4GxAiEA8FBZd1uqZ1PGrkety7EGgEJk\nBTrtu/WVLbGhbloNvr0CIQDW50RfhAmFJPh6bo4nKE/qtz5O0BVsoFQA8l7uB+eF\nuwIgC57HBLeBAOgTJmA+7ieMOe176qjT0A/q+7+oH67pFT0CIQDInpuAw6WTi2EA\nAsdoHMUGbEyZjL4Da2UggSNH+U8U0wIgR1ZLchEpsHafverbte2qHey/BSHyKEQi\ncCn1I7EnAH8=\n-----END PRIVATE KEY-----\"\"\"\n cert = \"\"\"-----BEGIN CERTIFICATE-----\nMIIBjTCCATegAwIBAgIJAMLQYSpm+vm9MA0GCSqGSIb3DQEBCwUAMCIxEDAOBgNV\nBAMMB1JTQSA1MTIxDjAMBgNVBAoMBVdlYkNBMB4XDTE4MDUyNzEwMjAzOFoXDTE4\nMDYyNjEwMjAzOFowIjEQMA4GA1UEAwwHUlNBIDUxMjEOMAwGA1UECgwFV2ViQ0Ew\nXDANBgkqhkiG9w0BAQEFAANLADBIAkEAybxDeYLbbriv2wJ2d0w09xGJdi7dIzgP\ntI6beSKkk3ILXRqj59ufj/i7RXg7RASOzZH/wmfvbBNsI5y5M62FDwIDAQABo1Aw\nTjAdBgNVHQ4EFgQUkaOkLIQe2hh8dGQFm+iSY/hjQucwHwYDVR0jBBgwFoAUkaOk\nLIQe2hh8dGQFm+iSY/hjQucwDAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAANB\nAL89kRbdtpdFo+nKxRWc6Dx72jbEX3nNBsxjVIHbm8RjFQ9ASwr6szqJjmROCXcF\nIJrZsa9U1KVUZBvzrhZrOCE=\n-----END CERTIFICATE-----\"\"\"\n pkcs12 = _create_pkcs12(priv, cert)\n self.assertEqual(utils.private_key_type(pkcs12), c.KEY_RSA)",
"def private_key(self):\n if self._private_key is not None:\n return self._private_key[0]\n\n spk = self.serialized_private_key\n passphrase = self.passphrase\n\n try:\n self._private_key = [\n serialization.load_pem_private_key(\n self.serialized_private_key,\n backend=default_backend(),\n password=self.passphrase)]\n\n return self._private_key[0]\n\n except:\n raise\n self._private_key = [None]\n return self._private_key[0]",
"def test_private_key_pkey(self):\n priv = \"\"\"-----BEGIN PRIVATE KEY-----\nMIIBVAIBADANBgkqhkiG9w0BAQEFAASCAT4wggE6AgEAAkEAybxDeYLbbriv2wJ2\nd0w09xGJdi7dIzgPtI6beSKkk3ILXRqj59ufj/i7RXg7RASOzZH/wmfvbBNsI5y5\nM62FDwIDAQABAkB/ayvrKd3TV0+rsyiEPVwO2cLLJNqEDjrNPm2w21K71WMVkngm\nOH0DpFePpPHQf+EdUfpRwZNdXhyt52MxC4GxAiEA8FBZd1uqZ1PGrkety7EGgEJk\nBTrtu/WVLbGhbloNvr0CIQDW50RfhAmFJPh6bo4nKE/qtz5O0BVsoFQA8l7uB+eF\nuwIgC57HBLeBAOgTJmA+7ieMOe176qjT0A/q+7+oH67pFT0CIQDInpuAw6WTi2EA\nAsdoHMUGbEyZjL4Da2UggSNH+U8U0wIgR1ZLchEpsHafverbte2qHey/BSHyKEQi\ncCn1I7EnAH8=\n-----END PRIVATE KEY-----\"\"\"\n key = crypto.load_privatekey(PEM, priv)\n self.assertEqual(utils.private_key_type(key), c.KEY_RSA)",
"def get_private_key_in_pem(self):\n serialized_private = self.private_key_obj.private_bytes(\n encoding=serialization.Encoding.PEM,\n format=serialization.PrivateFormat.TraditionalOpenSSL,\n encryption_algorithm=serialization.NoEncryption()\n )\n return serialized_private",
"def rsa_decrypt(cypher, privatekey):\r\n \r\n # A key object is created to interact with the PyCrypto\r\n # encryption suite. The object contains key data and\r\n # the necessary rsa functions.\r\n temp_key_obj = _rsa_keydict_to_keyobj(privatekey = privatekey) \r\n \r\n return _rsa_gluechops(cypher, temp_key_obj, temp_key_obj.decrypt)",
"def _get_decryption_key(self, **options):\n\n return self._private_key",
"def unwrap(self):\n\n if self.algorithm == 'ec':\n return self.asn1['public_key']\n return self.asn1['public_key'].parsed",
"def validate_privatekey_pem(key_pem):\n assert isinstance(key_pem, str)\n\n private_key_cryptography = serialization.load_pem_private_key(\n data=key_pem.encode('ascii'),\n password=None,\n backend=cryptography_default_backend\n )\n\n if not isinstance(private_key_cryptography, rsa.RSAPrivateKey):\n sys.exit('Unexpected private key type')\n\n return private_key_cryptography",
"def rsa_private_key_pkcs8_to_pkcs1(pkcs8_key):\n decoded_values = decoder.decode(pkcs8_key, asn1Spec=PKCS8PrivateKey())\n\n try:\n decoded_key = decoded_values[0]\n except IndexError:\n raise ValueError(\"Invalid private key encoding\")\n\n return decoded_key[\"privateKey\"]",
"def rsa_decrypt(self, thing):\n return self.true_private_key.decrypt(\n thing,\n cryptography.hazmat.primitives.asymmetric.padding.OAEP(\n mgf=cryptography.hazmat.primitives.asymmetric.padding.MGF1(\n algorithm=hashes.SHA256()),\n algorithm=hashes.SHA256(),\n label=None\n )\n )",
"def load_private_key_bytes(self, private_key):\n if not self.curve:\n raise NoCurveError(\"Curve must be set prior to key load.\")\n return self.load_private_key(\n SigningKey.from_string(private_key, curve=self.curve))",
"def private_key(self):\n return PrivateKey(self._sk.private_bytes(\n encoding=serialization.Encoding.Raw,\n format=serialization.PrivateFormat.Raw,\n encryption_algorithm=serialization.NoEncryption()))",
"def private_key(\n self,\n key: str,\n default: Any = undefined,\n description: str = None,\n key_format: Optional[EncryptionKeyFormat] = None,\n passphrase: Optional[str] = None,\n **kwargs\n ) -> Optional[PrivateKey]:\n cast_key = partial(cast_private_key, key_format=key_format, passphrase=passphrase)\n return self._process(key, description=description, default=default, cast=cast_key,type=PrivateKey, **kwargs)",
"def serializePrivateKey(private_key):\n\treturn private_key.private_bytes(\n\t\tencoding=serialization.Encoding.PEM,\n\t\tformat=serialization.PrivateFormat.PKCS8,\n\t\tencryption_algorithm=serialization.NoEncryption()\n\t)",
"def rsa_private_key(ctx, key_size=\"4096\"):\n rsa_key_size = int(key_size)\n\n key = rsa.generate_private_key(public_exponent=65537, key_size=rsa_key_size, backend=default_backend())\n\n ctx.data = str(\n key.private_bytes(\n encoding=serialization.Encoding.PEM,\n format=serialization.PrivateFormat.PKCS8,\n encryption_algorithm=serialization.NoEncryption(),\n ),\n \"utf-8\",\n )",
"def get_key_from_keyring(self):\n private_key = keyring.get_password(self.keyring_service_name, \"private_key\")\n\n if private_key is not None:\n return base64.b64decode(private_key)\n else:\n return None",
"def rsa_file_to_privatekey(filename):\r\n fileobject = file(filename,'r')\r\n privatekeystring = fileobject.read()\r\n fileobject.close()\r\n\r\n return rsa_string_to_privatekey(privatekeystring)",
"def _unarmor_pem(data, password=None):\n\n object_type, headers, der_bytes = unarmor(data)\n\n type_regex = '^((DSA|EC|RSA) PRIVATE KEY|ENCRYPTED PRIVATE KEY|PRIVATE KEY|PUBLIC KEY|RSA PUBLIC KEY|CERTIFICATE)'\n armor_type = re.match(type_regex, object_type)\n if not armor_type:\n raise ValueError(pretty_message(\n '''\n data does not seem to contain a PEM-encoded certificate, private\n key or public key\n '''\n ))\n\n pem_header = armor_type.group(1)\n\n data = data.strip()\n\n # RSA private keys are encrypted after being DER-encoded, but before base64\n # encoding, so they need to be handled specially\n if pem_header in set(['RSA PRIVATE KEY', 'DSA PRIVATE KEY', 'EC PRIVATE KEY']):\n algo = armor_type.group(2).lower()\n return ('private key', algo, _unarmor_pem_openssl_private(headers, der_bytes, password))\n\n key_type = pem_header.lower()\n algo = None\n if key_type == 'encrypted private key':\n key_type = 'private key'\n elif key_type == 'rsa public key':\n key_type = 'public key'\n algo = 'rsa'\n\n return (key_type, algo, der_bytes)"
] | [
"0.7709716",
"0.7091911",
"0.6661885",
"0.6650223",
"0.6593615",
"0.6553231",
"0.64998823",
"0.6498649",
"0.6429571",
"0.64050627",
"0.6381211",
"0.6249152",
"0.6246417",
"0.6217567",
"0.62138116",
"0.6204138",
"0.6198371",
"0.6193991",
"0.6141875",
"0.6141632",
"0.6117683",
"0.6094897",
"0.60794103",
"0.6072148",
"0.6068357",
"0.6056227",
"0.6046553",
"0.6008377",
"0.60045433",
"0.6002406"
] | 0.7861159 | 0 |
Unwraps a public key into an asn1crypto.keys.RSAPublicKey, asn1crypto.core.Integer (for DSA) or asn1crypto.keys.ECPointBitString object | def unwrap(self):
if self.algorithm == 'ec':
return self.asn1['public_key']
return self.asn1['public_key'].parsed | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def decode_credential_public_key(\n key: bytes,\n) -> Union[DecodedOKPPublicKey, DecodedEC2PublicKey, DecodedRSAPublicKey]:\n # Occassionally we might be given a public key in an \"uncompressed\" format,\n # typically from older U2F security keys. As per the FIDO spec this is indicated by\n # a leading 0x04 \"uncompressed point compression method\" format byte. In that case\n # we need to fill in some blanks to turn it into a full EC2 key for signature\n # verification\n #\n # See https://fidoalliance.org/specs/fido-v2.0-id-20180227/fido-registry-v2.0-id-20180227.html#public-key-representation-formats\n if key[0] == 0x04:\n return DecodedEC2PublicKey(\n kty=COSEKTY.EC2,\n alg=COSEAlgorithmIdentifier.ECDSA_SHA_256,\n crv=COSECRV.P256,\n x=key[1:33],\n y=key[33:65],\n )\n\n decoded_key: dict = decoder.loads(key)\n\n kty = decoded_key[COSEKey.KTY]\n alg = decoded_key[COSEKey.ALG]\n\n if not kty:\n raise InvalidPublicKeyStructure(\"Credential public key missing kty\")\n if not alg:\n raise InvalidPublicKeyStructure(\"Credential public key missing alg\")\n\n if kty == COSEKTY.OKP:\n crv = decoded_key[COSEKey.CRV]\n x = decoded_key[COSEKey.X]\n\n if not crv:\n raise InvalidPublicKeyStructure(\"OKP credential public key missing crv\")\n if not x:\n raise InvalidPublicKeyStructure(\"OKP credential public key missing x\")\n\n return DecodedOKPPublicKey(\n kty=kty,\n alg=alg,\n crv=crv,\n x=x,\n )\n elif kty == COSEKTY.EC2:\n crv = decoded_key[COSEKey.CRV]\n x = decoded_key[COSEKey.X]\n y = decoded_key[COSEKey.Y]\n\n if not crv:\n raise InvalidPublicKeyStructure(\"EC2 credential public key missing crv\")\n if not x:\n raise InvalidPublicKeyStructure(\"EC2 credential public key missing x\")\n if not y:\n raise InvalidPublicKeyStructure(\"EC2 credential public key missing y\")\n\n return DecodedEC2PublicKey(\n kty=kty,\n alg=alg,\n crv=crv,\n x=x,\n y=y,\n )\n elif kty == COSEKTY.RSA:\n n = decoded_key[COSEKey.N]\n e = decoded_key[COSEKey.E]\n\n if not n:\n raise InvalidPublicKeyStructure(\"RSA credential public key missing n\")\n if not e:\n raise InvalidPublicKeyStructure(\"RSA credential public key missing e\")\n\n return DecodedRSAPublicKey(\n kty=kty,\n alg=alg,\n n=n,\n e=e,\n )\n\n raise UnsupportedPublicKeyType(f'Unsupported credential public key type \"{kty}\"')",
"def get_pub_rsa_key(pub_key):\n return RSA.importKey(pub_key)",
"def load_pub_key_bytes(bs: bytes) -> rsa.RSAPublicKey:\n k = serialization.load_pem_public_key(bs)\n assert isinstance(k, rsa.RSAPublicKey)\n return k",
"def _wrap_publickey(self) -> None:\n p_der = ffi.new(\"unsigned char **\")\n der_len = lib.i2d_PublicKey(self._pkey, p_der)\n if der_len < 0:\n raise InvalidPKeyError(\"Could not serialize public key\")\n try:\n der = ffi.buffer(p_der[0], der_len)[:]\n try:\n self._key = load_der_public_key(der, backend=default_backend())\n except ValueError as exc:\n raise InvalidPKeyError from exc\n finally:\n lib.OPENSSL_free(p_der[0])",
"def parse_public(data):\n\n if not isinstance(data, byte_cls):\n raise TypeError(pretty_message(\n '''\n data must be a byte string, not %s\n ''',\n type_name(data)\n ))\n\n key_type = None\n\n # Appears to be PEM formatted\n if re.match(b'\\\\s*-----', data) is not None:\n key_type, algo, data = _unarmor_pem(data)\n\n if key_type == 'private key':\n raise ValueError(pretty_message(\n '''\n The data specified does not appear to be a public key or\n certificate, but rather a private key\n '''\n ))\n\n # When a public key returning from _unarmor_pem has a known algorithm\n # of RSA, that means the DER structure is of the type RSAPublicKey, so\n # we need to wrap it in the PublicKeyInfo structure.\n if algo == 'rsa':\n return PublicKeyInfo.wrap(data, 'rsa')\n\n if key_type is None or key_type == 'public key':\n try:\n pki = PublicKeyInfo.load(data)\n # Call .native to fully parse since asn1crypto is lazy\n pki.native\n return pki\n except (ValueError):\n pass # Data was not PublicKeyInfo\n\n try:\n rpk = RSAPublicKey.load(data)\n # Call .native to fully parse since asn1crypto is lazy\n rpk.native\n return PublicKeyInfo.wrap(rpk, 'rsa')\n except (ValueError):\n pass # Data was not an RSAPublicKey\n\n if key_type is None or key_type == 'certificate':\n try:\n parsed_cert = Certificate.load(data)\n key_info = parsed_cert['tbs_certificate']['subject_public_key_info']\n return key_info\n except (ValueError):\n pass # Data was not a cert\n\n raise ValueError('The data specified does not appear to be a known public key or certificate format')",
"def rsa_public_key_pkcs8_to_pkcs1(pkcs8_key):\n decoded_values = decoder.decode(pkcs8_key, asn1Spec=PublicKeyInfo())\n\n try:\n decoded_key = decoded_values[0]\n except IndexError:\n raise ValueError(\"Invalid public key encoding.\")\n\n return decoded_key[\"publicKey\"].asOctets()",
"def serializePublicKey(public_key):\n\treturn public_key.public_bytes(\n\t\tencoding=serialization.Encoding.PEM,\n\t\tformat=serialization.PublicFormat.SubjectPublicKeyInfo\n\t)",
"def parseAsPublicKey(s):\r\n return parsePEMKey(s, public=True)",
"def decode_public_key(as_bytes: typing.List[int]) -> PublicKey:\n raise NotImplementedError()",
"def public_key_from_private_key(privkey: bytes, compressed: bool) -> bytes:\n key = ECKey(privkey)\n return key.get_public_key(compressed)",
"def generate_rsa_public_key(private_key: RSAPrivateKeyWithSerialization) -> RSAPublicKey:\n return private_key.public_key()",
"def extractPublicKey(cert):\n pk = cert.get_pubkey()\n\n b = _util.binding\n l = b.lib\n ffi = b.ffi\n rsa = l.EVP_PKEY_get1_RSA(pk._pkey)\n buf = ffi.new(\"unsigned char **\")\n length = l.i2d_RSA_PUBKEY(rsa, buf)\n pk = ffi.buffer(buf[0], length)[:]\n ffi.gc(buf[0], l.OPENSSL_free)\n return pk",
"def get_pub_key(priv_key: rsa.RSAPrivateKey) -> rsa.RSAPublicKey:\n return priv_key.public_key()",
"def _get_keyidv2(pubkey: SupportedKeyTypes) -> int:\n if isinstance(pubkey, RSAPublicKey):\n fmt = serialization.PublicFormat.PKCS1\n pubbytes = pubkey.public_bytes(encoding=serialization.Encoding.DER, format=fmt)\n elif isinstance(pubkey, EllipticCurvePublicKey):\n fmt = serialization.PublicFormat.UncompressedPoint\n pubbytes = pubkey.public_bytes(encoding=serialization.Encoding.X962, format=fmt)\n else:\n raise UnsupportedAlgorithm(f\"Unsupported public key type {type(pubkey)}\")\n\n default_be = backends.default_backend()\n digest = hashes.Hash(hashes.SHA1(), backend=default_be)\n digest.update(pubbytes)\n keydigest = digest.finalize()\n return int.from_bytes(keydigest[16:], \"big\")",
"def Read(key):\n rsa = json.loads(key)\n params = {\n 'modulus': util.Base64WSDecode(rsa['modulus']),\n 'publicExponent': util.Base64WSDecode(rsa['publicExponent'])\n }\n\n pubkey = RSA.construct((util.BytesToLong(params['modulus']),\n util.BytesToLong(params['publicExponent'])))\n return RsaPublicKey(params, pubkey, rsa['size'])",
"def _get_pubkey_from_pem_public_key(filedata: bytes, backend: Any) -> Tuple[Any, None]:\n try:\n return serialization.load_pem_public_key(filedata, backend=backend), None\n except Exception:\n return None, None",
"def _createPublicKey(key):\r\n if not isinstance(key, RSAKey):\r\n raise AssertionError()\r\n return _createPublicRSAKey(key.n, key.e)",
"def Read(key):\n rsa = json.loads(key)\n params = {'modulus' : util.Decode(rsa['modulus']),\n 'publicExponent' : util.Decode(rsa['publicExponent'])}\n\n pubkey = RSA.construct((util.BytesToLong(params['modulus']),\n util.BytesToLong(params['publicExponent'])))\n return RsaPublicKey(params, pubkey, rsa['size'])",
"def rsa_public_key(ctx):\n if not ctx.data:\n raise RefError(\n \"Ref error: eval_func: RSA public key cannot be derived; try \"\n \"something like '|reveal:path/to/encrypted_private_key|rsapublic'\"\n )\n\n public_key(ctx)",
"def _get_pubkey_from_der_public_key(filedata: bytes, backend: Any) -> Tuple[Any, None]:\n try:\n return serialization.load_der_public_key(filedata, backend=backend), None\n except Exception:\n return None, None",
"def public_key(ctx):\n if not ctx.data:\n raise RefError(\n \"Ref error: eval_func: public key cannot be derived; try \"\n \"something like '|reveal:path/to/encrypted_private_key|publickey'\"\n )\n\n data_dec = ctx.data\n if ctx.ref_encoding == \"base64\":\n data_dec = base64.b64decode(data_dec).decode()\n\n private_key = serialization.load_pem_private_key(\n data_dec.encode(), password=None, backend=default_backend()\n )\n public_key = private_key.public_key()\n\n ctx.data = str(\n public_key.public_bytes(\n encoding=serialization.Encoding.PEM, format=serialization.PublicFormat.SubjectPublicKeyInfo\n ),\n \"UTF-8\",\n )",
"def rsa_publickey_to_string(publickey):\r\n if not rsa_is_valid_publickey(publickey):\r\n raise ValueError, \"Invalid public key\"\r\n\r\n return str(publickey['e'])+\" \"+str(publickey['n'])",
"def deserializePublicKey(string, bc = backend):\n\tif type(string) == str:\n\t\tstring = string.encode('utf8')\n\treturn serialization.load_pem_public_key(string , backend = bc)",
"def rsa_string_to_publickey(mystr):\r\n if len(mystr.split()) != 2:\r\n raise ValueError, \"Invalid public key string\"\r\n \r\n return {'e':long(mystr.split()[0]), 'n':long(mystr.split()[1])}",
"def validate_handshake_public_key(cls, public_key: bytes) -> None:\n ...",
"def export_public_key(self, public_key):\n error = vscf_error_t()\n result = self._lib_vscf_ecc.vscf_ecc_export_public_key(self.ctx, public_key.c_impl, error)\n VscfStatus.handle_status(error.status)\n instance = RawPublicKey.take_c_ctx(result)\n return instance",
"def test_public_key_rsa(self):\n cert = \"\"\"-----BEGIN CERTIFICATE-----\nMIIBjTCCATegAwIBAgIJAMLQYSpm+vm9MA0GCSqGSIb3DQEBCwUAMCIxEDAOBgNV\nBAMMB1JTQSA1MTIxDjAMBgNVBAoMBVdlYkNBMB4XDTE4MDUyNzEwMjAzOFoXDTE4\nMDYyNjEwMjAzOFowIjEQMA4GA1UEAwwHUlNBIDUxMjEOMAwGA1UECgwFV2ViQ0Ew\nXDANBgkqhkiG9w0BAQEFAANLADBIAkEAybxDeYLbbriv2wJ2d0w09xGJdi7dIzgP\ntI6beSKkk3ILXRqj59ufj/i7RXg7RASOzZH/wmfvbBNsI5y5M62FDwIDAQABo1Aw\nTjAdBgNVHQ4EFgQUkaOkLIQe2hh8dGQFm+iSY/hjQucwHwYDVR0jBBgwFoAUkaOk\nLIQe2hh8dGQFm+iSY/hjQucwDAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAANB\nAL89kRbdtpdFo+nKxRWc6Dx72jbEX3nNBsxjVIHbm8RjFQ9ASwr6szqJjmROCXcF\nIJrZsa9U1KVUZBvzrhZrOCE=\n-----END CERTIFICATE-----\"\"\"\n x509 = crypto.load_certificate(PEM, cert)\n self.assertEqual(utils.public_key_type(x509), c.KEY_RSA)",
"def solve(key_data: bytes) -> PublicKey:\n return { # type: ignore\n Encoding.PEM: load_pem_public_key,\n Encoding.DER: load_der_public_key\n }[real_encoding](key_data, default_backend())",
"def rsa_file_to_publickey(filename):\r\n fileobject = file(filename,'r')\r\n publickeystring = fileobject.read()\r\n fileobject.close()\r\n\r\n return rsa_string_to_publickey(publickeystring)",
"def get_public_key_in_pem(self):\n serialized_public = self.public_key_obj.public_bytes(\n encoding=serialization.Encoding.PEM,\n format=serialization.PublicFormat.SubjectPublicKeyInfo\n )\n return serialized_public"
] | [
"0.70528996",
"0.682243",
"0.6711425",
"0.67085093",
"0.6610189",
"0.65048105",
"0.6489869",
"0.6488379",
"0.64881927",
"0.64246345",
"0.6423231",
"0.64138883",
"0.6409382",
"0.6395284",
"0.63761365",
"0.6347127",
"0.6329381",
"0.6321539",
"0.62870216",
"0.6245045",
"0.6196625",
"0.6152874",
"0.61456263",
"0.6134476",
"0.61180043",
"0.61159605",
"0.6099881",
"0.6069098",
"0.6031535",
"0.60043114"
] | 0.71075326 | 0 |
Unwraps an asn1crypto.keys.PrivateKeyInfo object into an asn1crypto.keys.RSAPrivateKey, asn1crypto.keys.DSAPrivateKey or asn1crypto.keys.ECPrivateKey. | def _unwrap_private_key_info(key_info):
key_alg = key_info.algorithm
if key_alg == 'rsa' or key_alg == 'rsassa_pss':
return key_info['private_key'].parsed
if key_alg == 'dsa':
params = key_info['private_key_algorithm']['parameters']
parsed = key_info['private_key'].parsed
return DSAPrivateKey({
'version': 0,
'p': params['p'],
'q': params['q'],
'g': params['g'],
'public_key': Integer(pow(
params['g'].native,
parsed.native,
params['p'].native
)),
'private_key': parsed,
})
if key_alg == 'ec':
parsed = key_info['private_key'].parsed
parsed['parameters'] = key_info['private_key_algorithm']['parameters']
return parsed
raise ValueError('Unsupported key_info.algorithm "%s"' % key_info.algorithm) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def unwrap(self):\n\n if self.algorithm == 'rsa':\n return self.asn1['private_key'].parsed\n\n if self.algorithm == 'dsa':\n params = self.asn1['private_key_algorithm']['parameters']\n return DSAPrivateKey({\n 'version': 0,\n 'p': params['p'],\n 'q': params['q'],\n 'g': params['g'],\n 'public_key': self.public_key.unwrap(),\n 'private_key': self.asn1['private_key'].parsed,\n })\n\n if self.algorithm == 'ec':\n output = self.asn1['private_key'].parsed\n output['parameters'] = self.asn1['private_key_algorithm']['parameters']\n output['public_key'] = self.public_key.unwrap()\n return output",
"def parse_private(data, password=None):\n\n if not isinstance(data, byte_cls):\n raise TypeError(pretty_message(\n '''\n data must be a byte string, not %s\n ''',\n type_name(data)\n ))\n\n if password is not None:\n if not isinstance(password, byte_cls):\n raise TypeError(pretty_message(\n '''\n password must be a byte string, not %s\n ''',\n type_name(password)\n ))\n else:\n password = b''\n\n # Appears to be PEM formatted\n if re.match(b'\\\\s*-----', data) is not None:\n key_type, _, data = _unarmor_pem(data, password)\n\n if key_type == 'public key':\n raise ValueError(pretty_message(\n '''\n The data specified does not appear to be a private key, but\n rather a public key\n '''\n ))\n\n if key_type == 'certificate':\n raise ValueError(pretty_message(\n '''\n The data specified does not appear to be a private key, but\n rather a certificate\n '''\n ))\n\n try:\n pki = PrivateKeyInfo.load(data)\n # Call .native to fully parse since asn1crypto is lazy\n pki.native\n return pki\n except (ValueError):\n pass # Data was not PrivateKeyInfo\n\n try:\n parsed_wrapper = EncryptedPrivateKeyInfo.load(data)\n encryption_algorithm_info = parsed_wrapper['encryption_algorithm']\n encrypted_data = parsed_wrapper['encrypted_data'].native\n decrypted_data = _decrypt_encrypted_data(encryption_algorithm_info, encrypted_data, password)\n pki = PrivateKeyInfo.load(decrypted_data)\n # Call .native to fully parse since asn1crypto is lazy\n pki.native\n return pki\n except (ValueError):\n pass # Data was not EncryptedPrivateKeyInfo\n\n try:\n parsed = RSAPrivateKey.load(data)\n # Call .native to fully parse since asn1crypto is lazy\n parsed.native\n return PrivateKeyInfo.wrap(parsed, 'rsa')\n except (ValueError):\n pass # Data was not an RSAPrivateKey\n\n try:\n parsed = DSAPrivateKey.load(data)\n # Call .native to fully parse since asn1crypto is lazy\n parsed.native\n return PrivateKeyInfo.wrap(parsed, 'dsa')\n except (ValueError):\n pass # Data was not a DSAPrivateKey\n\n try:\n parsed = ECPrivateKey.load(data)\n # Call .native to fully parse since asn1crypto is lazy\n parsed.native\n return PrivateKeyInfo.wrap(parsed, 'ec')\n except (ValueError):\n pass # Data was not an ECPrivateKey\n\n raise ValueError(pretty_message(\n '''\n The data specified does not appear to be a known private key format\n '''\n ))",
"def parsePrivateKey(s):\r\n return parsePEMKey(s, private=True)",
"def solve(key_data: bytes) -> PrivateKey:\n return { # type: ignore\n Encoding.PEM: load_pem_private_key,\n Encoding.DER: load_der_private_key\n }[real_encoding](key_data, password, default_backend())",
"def _unarmor_pem(data, password=None):\n\n object_type, headers, der_bytes = unarmor(data)\n\n type_regex = '^((DSA|EC|RSA) PRIVATE KEY|ENCRYPTED PRIVATE KEY|PRIVATE KEY|PUBLIC KEY|RSA PUBLIC KEY|CERTIFICATE)'\n armor_type = re.match(type_regex, object_type)\n if not armor_type:\n raise ValueError(pretty_message(\n '''\n data does not seem to contain a PEM-encoded certificate, private\n key or public key\n '''\n ))\n\n pem_header = armor_type.group(1)\n\n data = data.strip()\n\n # RSA private keys are encrypted after being DER-encoded, but before base64\n # encoding, so they need to be handled specially\n if pem_header in set(['RSA PRIVATE KEY', 'DSA PRIVATE KEY', 'EC PRIVATE KEY']):\n algo = armor_type.group(2).lower()\n return ('private key', algo, _unarmor_pem_openssl_private(headers, der_bytes, password))\n\n key_type = pem_header.lower()\n algo = None\n if key_type == 'encrypted private key':\n key_type = 'private key'\n elif key_type == 'rsa public key':\n key_type = 'public key'\n algo = 'rsa'\n\n return (key_type, algo, der_bytes)",
"def _wrap_privatekey(self) -> None:\n p_der = ffi.new(\"unsigned char **\")\n der_len = lib.i2d_PrivateKey(self._pkey, p_der)\n if der_len < 0:\n raise InvalidPKeyError(\"Could not serialize private key\")\n try:\n der = ffi.buffer(p_der[0], der_len)[:]\n try:\n self._key = load_der_private_key(der, password=None,\n backend=default_backend())\n except ValueError as exc:\n raise InvalidPKeyError from exc\n finally:\n lib.OPENSSL_free(p_der[0])",
"def _createPrivateKey(key):\r\n if not isinstance(key, RSAKey):\r\n raise AssertionError()\r\n if not key.hasPrivateKey():\r\n raise AssertionError()\r\n return _createPrivateRSAKey(key.n, key.e, key.d, key.p, key.q, key.dP,\r\n key.dQ, key.qInv)",
"def rsa_private_key_pkcs8_to_pkcs1(pkcs8_key):\n decoded_values = decoder.decode(pkcs8_key, asn1Spec=PKCS8PrivateKey())\n\n try:\n decoded_key = decoded_values[0]\n except IndexError:\n raise ValueError(\"Invalid private key encoding\")\n\n return decoded_key[\"privateKey\"]",
"def get_verifying_key(private_key):\n return private_key.get_verifying_key().to_pem().decode('ascii')",
"def _get_decryption_key(self, **options):\n\n return self._private_key",
"def unwrap(self):\n\n if self.algorithm == 'ec':\n return self.asn1['public_key']\n return self.asn1['public_key'].parsed",
"def convert_key_info_to_readable(key_info: dict[str, Any]) -> dict[str, Any]:\n key_fields = {'kid': 'key_id',\n 'kty': 'json_web_key_type',\n 'key_ops': 'key_operations',\n 'n': 'RSA_modulus',\n 'e': 'RSA_public_components',\n }\n for key, value in key_fields.items():\n if key in key_info:\n key_info[value] = key_info.pop(key)\n\n return key_info",
"def text2PrivateKey(text:str):\n return RSA.importKey(b58decode(text))",
"def rsa_decrypt(cypher, privatekey):\r\n \r\n # A key object is created to interact with the PyCrypto\r\n # encryption suite. The object contains key data and\r\n # the necessary rsa functions.\r\n temp_key_obj = _rsa_keydict_to_keyobj(privatekey = privatekey) \r\n \r\n return _rsa_gluechops(cypher, temp_key_obj, temp_key_obj.decrypt)",
"def rsa_decrypt(self, thing):\n return self.true_private_key.decrypt(\n thing,\n cryptography.hazmat.primitives.asymmetric.padding.OAEP(\n mgf=cryptography.hazmat.primitives.asymmetric.padding.MGF1(\n algorithm=hashes.SHA256()),\n algorithm=hashes.SHA256(),\n label=None\n )\n )",
"def mk_keyobj_from_private_key(self, privkey):\n bn = BACKEND_KP.private_key_obj._backend._ffi.NULL\n bn_ptr = BACKEND_KP.private_key_obj._backend._lib.BN_bin2bn(privkey, len(privkey), bn)\n secret_val = BACKEND_KP.private_key_obj._backend._bn_to_int(bn_ptr)\n\n if self.curvetype == KeyType.ECDSA_P256v1:\n self.private_key_obj = ec.derive_private_key(secret_val, ec.SECP256R1(), default_backend())\n elif self.curvetype == KeyType.ECDSA_SECP256k1:\n self.private_key_obj = ec.derive_private_key(secret_val, ec.SECP256K1(), default_backend())\n self._get_naive_private_key_bytes()\n self.public_key_obj = self.private_key_obj.public_key()\n self._get_naive_public_key_bytes()",
"def get_private_key_in_der(self):\n serialized_private = self.private_key_obj.private_bytes(\n encoding=serialization.Encoding.DER,\n format=serialization.PrivateFormat.TraditionalOpenSSL,\n encryption_algorithm=serialization.NoEncryption()\n )\n return serialized_private",
"def decrypt(s):\n if s is None:\n return None\n else:\n # try:\n enc_value = ast.literal_eval(s)\n private_key = serialization.load_pem_private_key(\n pkey.encode('utf-8'),\n password=None,\n backend=default_backend()\n )\n\n dec = private_key.decrypt(\n enc_value,\n padding.OAEP(\n mgf=padding.MGF1(algorithm=hashes.SHA256()),\n algorithm=hashes.SHA256(),\n label=None\n )\n )\n return dec.decode()",
"def _get_private_key(self, privkey=None):\n\n # read private keys from keyring\n privkeys = self.gpg.list_keys(True) # True => private keys\n if len(privkeys) > 0 and privkeys[-1].has_key('fingerprint'):\n fingerprints = []\n for k in privkeys:\n fingerprints.append(k['fingerprint'])\n else:\n # no private key in keyring\n return None\n\n if privkey:\n # check for existence of private key received as argument\n # DEVEL: check for expiration as well\n if len(privkey) > 7 and len(privkey) <= 40:\n for fp in fingerprints:\n if fp.endswith(privkey):\n # work with last 16 significant chars internally,\n # even if only 8 are required in trac.ini\n privkey = fp[-16:]\n break\n # no fingerprint matching key ID\n else:\n privkey = None\n else:\n # reset invalid key ID\n privkey = None\n else:\n # select (last) private key from keyring\n privkey = fingerprints[-1][-16:]\n\n return privkey",
"def get_key_from_keyring(self):\n private_key = keyring.get_password(self.keyring_service_name, \"private_key\")\n\n if private_key is not None:\n return base64.b64decode(private_key)\n else:\n return None",
"def get_private_key_in_pem(self):\n serialized_private = self.private_key_obj.private_bytes(\n encoding=serialization.Encoding.PEM,\n format=serialization.PrivateFormat.TraditionalOpenSSL,\n encryption_algorithm=serialization.NoEncryption()\n )\n return serialized_private",
"def _unarmor_pem_openssl_private(headers, data, password):\n\n enc_algo = None\n enc_iv_hex = None\n enc_iv = None\n\n if 'DEK-Info' in headers:\n params = headers['DEK-Info']\n if params.find(',') != -1:\n enc_algo, enc_iv_hex = params.strip().split(',')\n else:\n enc_algo = 'RC4'\n\n if not enc_algo:\n return data\n\n if enc_iv_hex:\n enc_iv = binascii.unhexlify(enc_iv_hex.encode('ascii'))\n enc_algo = enc_algo.lower()\n\n enc_key_length = {\n 'aes-128-cbc': 16,\n 'aes-128': 16,\n 'aes-192-cbc': 24,\n 'aes-192': 24,\n 'aes-256-cbc': 32,\n 'aes-256': 32,\n 'rc4': 16,\n 'rc4-64': 8,\n 'rc4-40': 5,\n 'rc2-64-cbc': 8,\n 'rc2-40-cbc': 5,\n 'rc2-cbc': 16,\n 'rc2': 16,\n 'des-ede3-cbc': 24,\n 'des-ede3': 24,\n 'des3': 24,\n 'des-ede-cbc': 16,\n 'des-cbc': 8,\n 'des': 8,\n }[enc_algo]\n\n enc_key = hashlib.md5(password + enc_iv[0:8]).digest()\n while enc_key_length > len(enc_key):\n enc_key += hashlib.md5(enc_key + password + enc_iv[0:8]).digest()\n enc_key = enc_key[0:enc_key_length]\n\n enc_algo_name = {\n 'aes-128-cbc': 'aes',\n 'aes-128': 'aes',\n 'aes-192-cbc': 'aes',\n 'aes-192': 'aes',\n 'aes-256-cbc': 'aes',\n 'aes-256': 'aes',\n 'rc4': 'rc4',\n 'rc4-64': 'rc4',\n 'rc4-40': 'rc4',\n 'rc2-64-cbc': 'rc2',\n 'rc2-40-cbc': 'rc2',\n 'rc2-cbc': 'rc2',\n 'rc2': 'rc2',\n 'des-ede3-cbc': 'tripledes',\n 'des-ede3': 'tripledes',\n 'des3': 'tripledes',\n 'des-ede-cbc': 'tripledes',\n 'des-cbc': 'des',\n 'des': 'des',\n }[enc_algo]\n decrypt_func = crypto_funcs[enc_algo_name]\n\n if enc_algo_name == 'rc4':\n return decrypt_func(enc_key, data)\n\n return decrypt_func(enc_key, data, enc_iv)",
"def parsePEM(s, passwordCallback=None):\r\n\r\n if pemSniff(s, \"PRIVATE KEY\"):\r\n bytes = dePem(s, \"PRIVATE KEY\")\r\n return Python_RSAKey._parsePKCS8(bytes)\r\n elif pemSniff(s, \"RSA PRIVATE KEY\"):\r\n bytes = dePem(s, \"RSA PRIVATE KEY\")\r\n return Python_RSAKey._parseSSLeay(bytes)\r\n else:\r\n raise SyntaxError(\"Not a PEM private key file\")",
"def rsa_decrypt(data, rsa_priv_key_str):\r\n key = RSA.importKey(rsa_priv_key_str)\r\n cipher = PKCS1_OAEP.new(key)\r\n return cipher.decrypt(data)",
"def test_private_key_rsa(self):\n priv = \"\"\"-----BEGIN PRIVATE KEY-----\nMIIBVAIBADANBgkqhkiG9w0BAQEFAASCAT4wggE6AgEAAkEAybxDeYLbbriv2wJ2\nd0w09xGJdi7dIzgPtI6beSKkk3ILXRqj59ufj/i7RXg7RASOzZH/wmfvbBNsI5y5\nM62FDwIDAQABAkB/ayvrKd3TV0+rsyiEPVwO2cLLJNqEDjrNPm2w21K71WMVkngm\nOH0DpFePpPHQf+EdUfpRwZNdXhyt52MxC4GxAiEA8FBZd1uqZ1PGrkety7EGgEJk\nBTrtu/WVLbGhbloNvr0CIQDW50RfhAmFJPh6bo4nKE/qtz5O0BVsoFQA8l7uB+eF\nuwIgC57HBLeBAOgTJmA+7ieMOe176qjT0A/q+7+oH67pFT0CIQDInpuAw6WTi2EA\nAsdoHMUGbEyZjL4Da2UggSNH+U8U0wIgR1ZLchEpsHafverbte2qHey/BSHyKEQi\ncCn1I7EnAH8=\n-----END PRIVATE KEY-----\"\"\"\n cert = \"\"\"-----BEGIN CERTIFICATE-----\nMIIBjTCCATegAwIBAgIJAMLQYSpm+vm9MA0GCSqGSIb3DQEBCwUAMCIxEDAOBgNV\nBAMMB1JTQSA1MTIxDjAMBgNVBAoMBVdlYkNBMB4XDTE4MDUyNzEwMjAzOFoXDTE4\nMDYyNjEwMjAzOFowIjEQMA4GA1UEAwwHUlNBIDUxMjEOMAwGA1UECgwFV2ViQ0Ew\nXDANBgkqhkiG9w0BAQEFAANLADBIAkEAybxDeYLbbriv2wJ2d0w09xGJdi7dIzgP\ntI6beSKkk3ILXRqj59ufj/i7RXg7RASOzZH/wmfvbBNsI5y5M62FDwIDAQABo1Aw\nTjAdBgNVHQ4EFgQUkaOkLIQe2hh8dGQFm+iSY/hjQucwHwYDVR0jBBgwFoAUkaOk\nLIQe2hh8dGQFm+iSY/hjQucwDAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAANB\nAL89kRbdtpdFo+nKxRWc6Dx72jbEX3nNBsxjVIHbm8RjFQ9ASwr6szqJjmROCXcF\nIJrZsa9U1KVUZBvzrhZrOCE=\n-----END CERTIFICATE-----\"\"\"\n pkcs12 = _create_pkcs12(priv, cert)\n self.assertEqual(utils.private_key_type(pkcs12), c.KEY_RSA)",
"def deserializePrivateKey(string, bc = backend):\n\tif type(string) == str:\n\t\tstring = string.encode('utf8')\n\treturn serialization.load_pem_private_key(string, password = None , backend = bc)",
"def rsa_file_to_privatekey(filename):\r\n fileobject = file(filename,'r')\r\n privatekeystring = fileobject.read()\r\n fileobject.close()\r\n\r\n return rsa_string_to_privatekey(privatekeystring)",
"def _serialize_private_key(private_key, password=None):\n error = None\n pvt_key_loaders = [\n load_pem_private_key, load_der_private_key\n ]\n pvt_key = None\n for loader in pvt_key_loaders:\n if not pvt_key:\n try:\n pvt_key = loader(\n private_key.encode('utf-8'),\n password=password,\n backend=default_backend()\n )\n error = False\n break\n except (ValueError, UnsupportedAlgorithm) as err:\n error = err\n if error:\n raise errors.InvalidPrivateKeyError(error)\n else:\n return pvt_key",
"def rsa_private_key(ctx, key_size=\"4096\"):\n rsa_key_size = int(key_size)\n\n key = rsa.generate_private_key(public_exponent=65537, key_size=rsa_key_size, backend=default_backend())\n\n ctx.data = str(\n key.private_bytes(\n encoding=serialization.Encoding.PEM,\n format=serialization.PrivateFormat.PKCS8,\n encryption_algorithm=serialization.NoEncryption(),\n ),\n \"utf-8\",\n )",
"def get_principal_pkey( user_email, observer_secret ):\n \n sp = get_principal_data( user_email )\n if sp is None:\n logger.error(\"Failed to find private key for principal %s\" % user_email )\n return None \n \n public_key_pem = sp.public_key_pem\n sealed_private_key_pem = sp.sealed_private_key\n\n # unseal\n private_key_pem = verify_and_unseal_blob(public_key_pem, observer_secret, sealed_private_key_pem)\n if private_key_pem is None:\n logger.error(\"Failed to unseal private key\")\n\n return private_key_pem"
] | [
"0.72001606",
"0.61105424",
"0.59807235",
"0.59552276",
"0.5781235",
"0.5733041",
"0.5708165",
"0.56828797",
"0.5617318",
"0.56119716",
"0.55409443",
"0.55051327",
"0.54653615",
"0.54501706",
"0.5382498",
"0.53593355",
"0.53350115",
"0.5326151",
"0.5310217",
"0.5288465",
"0.52867794",
"0.5273756",
"0.526497",
"0.52596426",
"0.52040935",
"0.51865596",
"0.5159534",
"0.51291484",
"0.51176167",
"0.5079574"
] | 0.82866263 | 0 |
Removes PEMencoding from a public key, private key or certificate. If the private key is encrypted, the password will be used to decrypt it. | def _unarmor_pem(data, password=None):
object_type, headers, der_bytes = unarmor(data)
type_regex = '^((DSA|EC|RSA) PRIVATE KEY|ENCRYPTED PRIVATE KEY|PRIVATE KEY|PUBLIC KEY|RSA PUBLIC KEY|CERTIFICATE)'
armor_type = re.match(type_regex, object_type)
if not armor_type:
raise ValueError(pretty_message(
'''
data does not seem to contain a PEM-encoded certificate, private
key or public key
'''
))
pem_header = armor_type.group(1)
data = data.strip()
# RSA private keys are encrypted after being DER-encoded, but before base64
# encoding, so they need to be handled specially
if pem_header in set(['RSA PRIVATE KEY', 'DSA PRIVATE KEY', 'EC PRIVATE KEY']):
algo = armor_type.group(2).lower()
return ('private key', algo, _unarmor_pem_openssl_private(headers, der_bytes, password))
key_type = pem_header.lower()
algo = None
if key_type == 'encrypted private key':
key_type = 'private key'
elif key_type == 'rsa public key':
key_type = 'public key'
algo = 'rsa'
return (key_type, algo, der_bytes) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def key_to_pem(key, password=None):\n if password:\n enc = BestAvailableEncryption(as_bytes(password))\n else:\n enc = NoEncryption()\n return key.private_bytes(Encoding.PEM, PrivateFormat.PKCS8, enc)",
"def strip_begin_end_public_key(key):\n return key.replace(\"\\n\", \"\")\\\n .replace(\"-----BEGIN PUBLIC KEY-----\", \"\").replace(\n \"-----END PUBLIC KEY-----\", \"\")",
"def _unarmor_pem_openssl_private(headers, data, password):\n\n enc_algo = None\n enc_iv_hex = None\n enc_iv = None\n\n if 'DEK-Info' in headers:\n params = headers['DEK-Info']\n if params.find(',') != -1:\n enc_algo, enc_iv_hex = params.strip().split(',')\n else:\n enc_algo = 'RC4'\n\n if not enc_algo:\n return data\n\n if enc_iv_hex:\n enc_iv = binascii.unhexlify(enc_iv_hex.encode('ascii'))\n enc_algo = enc_algo.lower()\n\n enc_key_length = {\n 'aes-128-cbc': 16,\n 'aes-128': 16,\n 'aes-192-cbc': 24,\n 'aes-192': 24,\n 'aes-256-cbc': 32,\n 'aes-256': 32,\n 'rc4': 16,\n 'rc4-64': 8,\n 'rc4-40': 5,\n 'rc2-64-cbc': 8,\n 'rc2-40-cbc': 5,\n 'rc2-cbc': 16,\n 'rc2': 16,\n 'des-ede3-cbc': 24,\n 'des-ede3': 24,\n 'des3': 24,\n 'des-ede-cbc': 16,\n 'des-cbc': 8,\n 'des': 8,\n }[enc_algo]\n\n enc_key = hashlib.md5(password + enc_iv[0:8]).digest()\n while enc_key_length > len(enc_key):\n enc_key += hashlib.md5(enc_key + password + enc_iv[0:8]).digest()\n enc_key = enc_key[0:enc_key_length]\n\n enc_algo_name = {\n 'aes-128-cbc': 'aes',\n 'aes-128': 'aes',\n 'aes-192-cbc': 'aes',\n 'aes-192': 'aes',\n 'aes-256-cbc': 'aes',\n 'aes-256': 'aes',\n 'rc4': 'rc4',\n 'rc4-64': 'rc4',\n 'rc4-40': 'rc4',\n 'rc2-64-cbc': 'rc2',\n 'rc2-40-cbc': 'rc2',\n 'rc2-cbc': 'rc2',\n 'rc2': 'rc2',\n 'des-ede3-cbc': 'tripledes',\n 'des-ede3': 'tripledes',\n 'des3': 'tripledes',\n 'des-ede-cbc': 'tripledes',\n 'des-cbc': 'des',\n 'des': 'des',\n }[enc_algo]\n decrypt_func = crypto_funcs[enc_algo_name]\n\n if enc_algo_name == 'rc4':\n return decrypt_func(enc_key, data)\n\n return decrypt_func(enc_key, data, enc_iv)",
"def unwrap(self):\n\n if self.algorithm == 'rsa':\n return self.asn1['private_key'].parsed\n\n if self.algorithm == 'dsa':\n params = self.asn1['private_key_algorithm']['parameters']\n return DSAPrivateKey({\n 'version': 0,\n 'p': params['p'],\n 'q': params['q'],\n 'g': params['g'],\n 'public_key': self.public_key.unwrap(),\n 'private_key': self.asn1['private_key'].parsed,\n })\n\n if self.algorithm == 'ec':\n output = self.asn1['private_key'].parsed\n output['parameters'] = self.asn1['private_key_algorithm']['parameters']\n output['public_key'] = self.public_key.unwrap()\n return output",
"def validate_privatekey_pem(key_pem):\n assert isinstance(key_pem, str)\n\n private_key_cryptography = serialization.load_pem_private_key(\n data=key_pem.encode('ascii'),\n password=None,\n backend=cryptography_default_backend\n )\n\n if not isinstance(private_key_cryptography, rsa.RSAPrivateKey):\n sys.exit('Unexpected private key type')\n\n return private_key_cryptography",
"def test_set_private_key_setter_encrypted_pem_str_password(self) -> None:\n\n expected = self.pem_private_key.decode()\n\n encryptor = DataEncryption()\n encryptor.set_private_key(\n self.encrypted_pem_private_key, password=self.private_key_password.decode()\n )\n\n # pylint: disable=protected-access\n actual = encryptor._loaded_private_key.private_bytes(\n serialization.Encoding.PEM,\n serialization.PrivateFormat.PKCS8,\n serialization.NoEncryption(),\n ).decode()\n\n self.assertEqual(expected, actual)",
"def test_set_private_key_setter_encrypted_pem(self) -> None:\n\n expected = self.pem_private_key.decode()\n\n encryptor = DataEncryption()\n encryptor.set_private_key(\n self.encrypted_pem_private_key, password=self.private_key_password\n )\n\n # pylint: disable=protected-access\n actual = encryptor._loaded_private_key.private_bytes(\n serialization.Encoding.PEM,\n serialization.PrivateFormat.PKCS8,\n serialization.NoEncryption(),\n ).decode()\n\n self.assertEqual(expected, actual)",
"def dePem(s, name):\r\n prefix = \"-----BEGIN %s-----\" % name\r\n postfix = \"-----END %s-----\" % name \r\n start = s.find(prefix)\r\n if start == -1:\r\n raise SyntaxError(\"Missing PEM prefix\")\r\n end = s.find(postfix, start+len(prefix))\r\n if end == -1:\r\n raise SyntaxError(\"Missing PEM postfix\")\r\n s = s[start+len(\"-----BEGIN %s-----\" % name) : end]\r\n retBytes = a2b_base64(s) # May raise SyntaxError\r\n return retBytes",
"def get_private_key_in_pem(self):\n serialized_private = self.private_key_obj.private_bytes(\n encoding=serialization.Encoding.PEM,\n format=serialization.PrivateFormat.TraditionalOpenSSL,\n encryption_algorithm=serialization.NoEncryption()\n )\n return serialized_private",
"def test_set_private_key_setter_pem_str(self) -> None:\n\n expected = self.pem_private_key.decode()\n\n encryptor = DataEncryption()\n encryptor.set_private_key(self.pem_private_key.decode())\n\n # pylint: disable=protected-access\n actual = encryptor._loaded_private_key.private_bytes(\n serialization.Encoding.PEM,\n serialization.PrivateFormat.PKCS8,\n serialization.NoEncryption(),\n ).decode()\n\n self.assertEqual(expected, actual)",
"def convert_key_to_pem ( key_filename, output_filename ) :\n cmd = 'openssl rsa -in ' + key_filename + ' -outform PEM -out ' + output_filename\n return subprocess.call( cmd, shell = True )",
"def get_verifying_key(private_key):\n return private_key.get_verifying_key().to_pem().decode('ascii')",
"def passwd_decryption(self):\n with open(self.key_path, 'rb') as input_key:\n for line in input_key:\n key = line\n with open(self.pass_path, 'rb') as input_password:\n for line in input_password:\n password = line\n cipher_suit = Fernet(key)\n plain_password = cipher_suit.decrypt(password)\n plain_password = bytes(plain_password).decode('utf-8')\n \n return plain_password",
"def pfx2pem(input_file, output_file, passphrase=None):\n pfx = open(input_file, 'rb').read()\n p12 = crypto.load_pkcs12(pfx, passphrase)\n pem = crypto.dump_certificate(crypto.FILETYPE_PEM, p12.get_certificate())\n pem += crypto.dump_privatekey(crypto.FILETYPE_PEM, p12.get_privatekey())\n open(output_file, 'wb').write(pem)",
"def _unwrap_private_key_info(key_info):\n\n key_alg = key_info.algorithm\n\n if key_alg == 'rsa' or key_alg == 'rsassa_pss':\n return key_info['private_key'].parsed\n\n if key_alg == 'dsa':\n params = key_info['private_key_algorithm']['parameters']\n parsed = key_info['private_key'].parsed\n return DSAPrivateKey({\n 'version': 0,\n 'p': params['p'],\n 'q': params['q'],\n 'g': params['g'],\n 'public_key': Integer(pow(\n params['g'].native,\n parsed.native,\n params['p'].native\n )),\n 'private_key': parsed,\n })\n\n if key_alg == 'ec':\n parsed = key_info['private_key'].parsed\n parsed['parameters'] = key_info['private_key_algorithm']['parameters']\n return parsed\n\n raise ValueError('Unsupported key_info.algorithm \"%s\"' % key_info.algorithm)",
"def ec_private_pem_to_private_bin(pem):\n return \"\".join(pem.split(\"\\n\")[1:-2]).decode(\"BASE64\")",
"def pem_armor_certificate(certificate):\n\n return asymmetric.dump_certificate(certificate)",
"def verify_and_unseal_blob( public_key_pem, secret, blob_data ):\n\n # verify it \n rc, sealed_data = syndicate_crypto.verify_and_parse_json( public_key_pem, blob_data )\n if rc != 0:\n logger.error(\"Failed to verify and parse blob, rc = %s\" % rc)\n return None\n\n logger.info(\"Unsealing credential data\")\n\n rc, data = c_syndicate.password_unseal( sealed_data, secret )\n if rc != 0:\n logger.error(\"Failed to unseal blob, rc = %s\" % rc )\n return None\n\n return data",
"def decrypt(s):\n if s is None:\n return None\n else:\n # try:\n enc_value = ast.literal_eval(s)\n private_key = serialization.load_pem_private_key(\n pkey.encode('utf-8'),\n password=None,\n backend=default_backend()\n )\n\n dec = private_key.decrypt(\n enc_value,\n padding.OAEP(\n mgf=padding.MGF1(algorithm=hashes.SHA256()),\n algorithm=hashes.SHA256(),\n label=None\n )\n )\n return dec.decode()",
"def load_pem_x509_certificate(data):\n return _x509.load_pem_x509_certificate(data, _backends.default_backend())",
"def extract_ca_private_key_bytes_from_pem(pem_content):\n found_marker = False\n for begin_marker in [constants.BEGIN_PRIVATE_KEY_MARKER,\n constants.BEGIN_RSA_PRIVATE_KEY_MARKER]:\n begin_search = pem_content.find(begin_marker)\n if begin_search >= 0:\n found_marker = True\n break\n\n if not found_marker:\n raise exception.InvalidKubernetesCA\n\n found_marker = False\n for end_marker in [constants.END_PRIVATE_KEY_MARKER,\n constants.END_RSA_PRIVATE_KEY_MARKER]:\n end_search = pem_content.find(end_marker)\n if end_search >= 0:\n found_marker = True\n end_search += len(end_marker)\n break\n\n if not found_marker:\n raise exception.InvalidKubernetesCA\n\n base64_key = base64.encode_as_text(pem_content[begin_search:end_search])\n return base64_key",
"def parse_private(data, password=None):\n\n if not isinstance(data, byte_cls):\n raise TypeError(pretty_message(\n '''\n data must be a byte string, not %s\n ''',\n type_name(data)\n ))\n\n if password is not None:\n if not isinstance(password, byte_cls):\n raise TypeError(pretty_message(\n '''\n password must be a byte string, not %s\n ''',\n type_name(password)\n ))\n else:\n password = b''\n\n # Appears to be PEM formatted\n if re.match(b'\\\\s*-----', data) is not None:\n key_type, _, data = _unarmor_pem(data, password)\n\n if key_type == 'public key':\n raise ValueError(pretty_message(\n '''\n The data specified does not appear to be a private key, but\n rather a public key\n '''\n ))\n\n if key_type == 'certificate':\n raise ValueError(pretty_message(\n '''\n The data specified does not appear to be a private key, but\n rather a certificate\n '''\n ))\n\n try:\n pki = PrivateKeyInfo.load(data)\n # Call .native to fully parse since asn1crypto is lazy\n pki.native\n return pki\n except (ValueError):\n pass # Data was not PrivateKeyInfo\n\n try:\n parsed_wrapper = EncryptedPrivateKeyInfo.load(data)\n encryption_algorithm_info = parsed_wrapper['encryption_algorithm']\n encrypted_data = parsed_wrapper['encrypted_data'].native\n decrypted_data = _decrypt_encrypted_data(encryption_algorithm_info, encrypted_data, password)\n pki = PrivateKeyInfo.load(decrypted_data)\n # Call .native to fully parse since asn1crypto is lazy\n pki.native\n return pki\n except (ValueError):\n pass # Data was not EncryptedPrivateKeyInfo\n\n try:\n parsed = RSAPrivateKey.load(data)\n # Call .native to fully parse since asn1crypto is lazy\n parsed.native\n return PrivateKeyInfo.wrap(parsed, 'rsa')\n except (ValueError):\n pass # Data was not an RSAPrivateKey\n\n try:\n parsed = DSAPrivateKey.load(data)\n # Call .native to fully parse since asn1crypto is lazy\n parsed.native\n return PrivateKeyInfo.wrap(parsed, 'dsa')\n except (ValueError):\n pass # Data was not a DSAPrivateKey\n\n try:\n parsed = ECPrivateKey.load(data)\n # Call .native to fully parse since asn1crypto is lazy\n parsed.native\n return PrivateKeyInfo.wrap(parsed, 'ec')\n except (ValueError):\n pass # Data was not an ECPrivateKey\n\n raise ValueError(pretty_message(\n '''\n The data specified does not appear to be a known private key format\n '''\n ))",
"def solve(key_data: bytes) -> PrivateKey:\n return { # type: ignore\n Encoding.PEM: load_pem_private_key,\n Encoding.DER: load_der_private_key\n }[real_encoding](key_data, password, default_backend())",
"def parsePEM(s, passwordCallback=None):\r\n\r\n if pemSniff(s, \"PRIVATE KEY\"):\r\n bytes = dePem(s, \"PRIVATE KEY\")\r\n return Python_RSAKey._parsePKCS8(bytes)\r\n elif pemSniff(s, \"RSA PRIVATE KEY\"):\r\n bytes = dePem(s, \"RSA PRIVATE KEY\")\r\n return Python_RSAKey._parseSSLeay(bytes)\r\n else:\r\n raise SyntaxError(\"Not a PEM private key file\")",
"def test_set_private_key(self) -> None:\n\n expected = self.pem_private_key.decode()\n\n encryptor = DataEncryption(private_key=self.pem_private_key)\n\n # pylint: disable=protected-access\n actual = encryptor._loaded_private_key.private_bytes(\n serialization.Encoding.PEM,\n serialization.PrivateFormat.PKCS8,\n serialization.NoEncryption(),\n ).decode()\n\n self.assertEqual(expected, actual)",
"def base64_to_pem(crypto_type, b64_text, width=76):\n lines = ''\n for pos in xrange(0, len(b64_text), width):\n lines += b64_text[pos:pos + width] + '\\n'\n\n return '-----BEGIN %s-----\\n%s-----END %s-----' % (crypto_type, lines, crypto_type)",
"def parse_certificate(data):\n\n if not isinstance(data, byte_cls):\n raise TypeError(pretty_message(\n '''\n data must be a byte string, not %s\n ''',\n type_name(data)\n ))\n\n key_type = None\n\n # Appears to be PEM formatted\n if re.match(b'\\\\s*-----', data) is not None:\n key_type, _, data = _unarmor_pem(data)\n\n if key_type == 'private key':\n raise ValueError(pretty_message(\n '''\n The data specified does not appear to be a certificate, but\n rather a private key\n '''\n ))\n\n if key_type == 'public key':\n raise ValueError(pretty_message(\n '''\n The data specified does not appear to be a certificate, but\n rather a public key\n '''\n ))\n\n if key_type is None or key_type == 'certificate':\n try:\n return Certificate.load(data)\n except (ValueError):\n pass # Data was not a Certificate\n\n raise ValueError(pretty_message(\n '''\n The data specified does not appear to be a known certificate format\n '''\n ))",
"def decrypt_using_private_key(message):\n public_key_path = os.path.join('keys', 'private.key')\n with open(public_key_path, 'rb') as file:\n private_key = RSA.importKey(file.read())\n\n cipher = PKCS1_OAEP.new(private_key)\n encrypted = cipher.decrypt(message)\n return encrypted.hex()",
"def _unarmor(pem_bytes):\n\n if not isinstance(pem_bytes, byte_cls):\n raise TypeError(unwrap(\n '''\n pem_bytes must be a byte string, not %s\n ''',\n _type_name(pem_bytes)\n ))\n\n # Valid states include: \"trash\", \"headers\", \"body\"\n state = 'trash'\n headers = {}\n base64_data = b''\n object_type = None\n\n found_start = False\n found_end = False\n\n for line in pem_bytes.splitlines(False):\n if line == b'':\n continue\n\n if state == \"trash\":\n # Look for a starting line since some CA cert bundle show the cert\n # into in a parsed format above each PEM block\n type_name_match = re.match(b'^(?:---- |-----)BEGIN ([A-Z0-9 ]+)(?: ----|-----)', line)\n if not type_name_match:\n continue\n object_type = type_name_match.group(1).decode('ascii')\n\n found_start = True\n state = 'headers'\n continue\n\n if state == 'headers':\n if line.find(b':') == -1:\n state = 'body'\n else:\n decoded_line = line.decode('ascii')\n name, value = decoded_line.split(':', 1)\n headers[name] = value.strip()\n continue\n\n if state == 'body':\n if line[0:5] in (b'-----', b'---- '):\n der_bytes = base64.b64decode(base64_data)\n\n yield (object_type, headers, der_bytes)\n\n state = 'trash'\n headers = {}\n base64_data = b''\n object_type = None\n found_end = True\n continue\n\n base64_data += line\n\n if not found_start or not found_end:\n raise ValueError(unwrap(\n '''\n pem_bytes does not appear to contain PEM-encoded data - no\n BEGIN/END combination found\n '''\n ))",
"def test_use_privatekey_file_unicode(self, tmpfile):\n self._use_privatekey_file_test(\n tmpfile.decode(getfilesystemencoding()) + NON_ASCII,\n FILETYPE_PEM,\n )"
] | [
"0.5717775",
"0.5633551",
"0.55747586",
"0.55693495",
"0.55062234",
"0.5487441",
"0.544285",
"0.5425336",
"0.5414394",
"0.5376748",
"0.53717756",
"0.53678894",
"0.53630817",
"0.53443956",
"0.5333764",
"0.53271896",
"0.532627",
"0.5300595",
"0.5287096",
"0.5276774",
"0.52395916",
"0.5225118",
"0.51815087",
"0.5176023",
"0.51483715",
"0.51152956",
"0.5108976",
"0.51032734",
"0.50908834",
"0.5081816"
] | 0.5872964 | 0 |
Parses a PKCS12 ANS.1 DERencoded structure and extracts certs and keys | def _parse_pkcs12(data, password, load_private_key):
if not isinstance(data, byte_cls):
raise TypeError(pretty_message(
'''
data must be a byte string, not %s
''',
type_name(data)
))
if password is not None:
if not isinstance(password, byte_cls):
raise TypeError(pretty_message(
'''
password must be a byte string, not %s
''',
type_name(password)
))
else:
password = b''
certs = {}
private_keys = {}
pfx = Pfx.load(data)
auth_safe = pfx['auth_safe']
if auth_safe['content_type'].native != 'data':
raise ValueError(pretty_message(
'''
Only password-protected PKCS12 files are currently supported
'''
))
authenticated_safe = pfx.authenticated_safe
mac_data = pfx['mac_data']
if mac_data:
mac_algo = mac_data['mac']['digest_algorithm']['algorithm'].native
key_length = {
'sha1': 20,
'sha224': 28,
'sha256': 32,
'sha384': 48,
'sha512': 64,
'sha512_224': 28,
'sha512_256': 32,
}[mac_algo]
mac_key = pkcs12_kdf(
mac_algo,
password,
mac_data['mac_salt'].native,
mac_data['iterations'].native,
key_length,
3 # ID 3 is for generating an HMAC key
)
hash_mod = getattr(hashlib, mac_algo)
computed_hmac = hmac.new(mac_key, auth_safe['content'].contents, hash_mod).digest()
stored_hmac = mac_data['mac']['digest'].native
if not constant_compare(computed_hmac, stored_hmac):
raise ValueError('Password provided is invalid')
for content_info in authenticated_safe:
content = content_info['content']
if isinstance(content, OctetString):
_parse_safe_contents(content.native, certs, private_keys, password, load_private_key)
elif isinstance(content, EncryptedData):
encrypted_content_info = content['encrypted_content_info']
encryption_algorithm_info = encrypted_content_info['content_encryption_algorithm']
encrypted_content = encrypted_content_info['encrypted_content'].native
decrypted_content = _decrypt_encrypted_data(encryption_algorithm_info, encrypted_content, password)
_parse_safe_contents(decrypted_content, certs, private_keys, password, load_private_key)
else:
raise ValueError(pretty_message(
'''
Public-key-based PKCS12 files are not currently supported
'''
))
key_fingerprints = set(private_keys.keys())
cert_fingerprints = set(certs.keys())
common_fingerprints = sorted(list(key_fingerprints & cert_fingerprints))
key = None
cert = None
other_certs = []
if len(common_fingerprints) >= 1:
fingerprint = common_fingerprints[0]
key = private_keys[fingerprint]
cert = certs[fingerprint]
other_certs = [certs[f] for f in certs if f != fingerprint]
return (key, cert, other_certs)
if len(private_keys) > 0:
first_key = sorted(list(private_keys.keys()))[0]
key = private_keys[first_key]
if len(certs) > 0:
first_key = sorted(list(certs.keys()))[0]
cert = certs[first_key]
del certs[first_key]
if len(certs) > 0:
other_certs = sorted(list(certs.values()), key=lambda c: c.subject.human_friendly)
return (key, cert, other_certs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def parseBinary(self, bytes):\r\n\r\n self.bytes = bytearray(bytes)\r\n p = ASN1Parser(bytes)\r\n\r\n #Get the tbsCertificate\r\n tbsCertificateP = p.getChild(0)\r\n\r\n #Is the optional version field present?\r\n #This determines which index the key is at.\r\n if tbsCertificateP.value[0]==0xA0:\r\n subjectPublicKeyInfoIndex = 6\r\n else:\r\n subjectPublicKeyInfoIndex = 5\r\n\r\n #Get the subject\r\n self.subject = tbsCertificateP.getChildBytes(\\\r\n subjectPublicKeyInfoIndex - 1)\r\n\r\n #Get the subjectPublicKeyInfo\r\n subjectPublicKeyInfoP = tbsCertificateP.getChild(\\\r\n subjectPublicKeyInfoIndex)\r\n\r\n #Get the algorithm\r\n algorithmP = subjectPublicKeyInfoP.getChild(0)\r\n rsaOID = algorithmP.value\r\n if list(rsaOID) != [6, 9, 42, 134, 72, 134, 247, 13, 1, 1, 1, 5, 0]:\r\n raise SyntaxError(\"Unrecognized AlgorithmIdentifier\")\r\n\r\n #Get the subjectPublicKey\r\n subjectPublicKeyP = subjectPublicKeyInfoP.getChild(1)\r\n\r\n #Adjust for BIT STRING encapsulation\r\n if (subjectPublicKeyP.value[0] !=0):\r\n raise SyntaxError()\r\n subjectPublicKeyP = ASN1Parser(subjectPublicKeyP.value[1:])\r\n\r\n #Get the modulus and exponent\r\n modulusP = subjectPublicKeyP.getChild(0)\r\n publicExponentP = subjectPublicKeyP.getChild(1)\r\n\r\n #Decode them into numbers\r\n n = bytesToNumber(modulusP.value)\r\n e = bytesToNumber(publicExponentP.value)\r\n\r\n #Create a public key instance\r\n self.publicKey = _createPublicRSAKey(n, e)",
"def asn1_loads(asn1_str):\n\n # ASN.1 grammar\n identifier = pp.Word(pp.alphas + \"_\")\n assign = pp.Literal(\"::=\")\n # typedef = identifier.setName(\"typeref\") + assign + identifier.setName(\"basetype\")\n comment1 = pp.Literal(\"#\") + pp.originalTextFor(pp.SkipTo(pp.LineEnd()))\n # typelist = pp.OneOrMore(typedef)\n meta1 = pp.LineStart() + identifier + pp.Literal(\":\") + pp.SkipTo(pp.LineEnd()).setDebug()\n meta2 = pp.LineStart() + pp.White() + pp.SkipTo(pp.LineEnd()).setDebug()\n metaval = meta1 + pp.ZeroOrMore(meta2)\n # metalist = pp.ZeroOrMore(comment1) + pp.Literal(\"/*\") + pp.OneOrMore(metaval) + pp.Literal(\"*/\")\n metalist = pp.SkipTo(pp.Literal(\"/*\")).setDebug() + pp.Literal(\"/*\") + pp.OneOrMore(\n metaval).setDebug() + pp.Literal(\"*/\")\n\n asn1 = metalist.parseString(asn1_str, parseAll=False)\n print(asn1)\n jaen = {\"meta\": {}, \"types\": []}\n return jaen",
"def _extract_values_from_certificate(cert):\n logger = getLogger(__name__)\n # cert and serial number\n data = {\n u'cert': cert,\n u'issuer': cert.get_issuer().der(),\n u'serial_number': cert.get_serial_number(),\n u'algorithm': rfc2437.id_sha1,\n u'algorithm_parameter': univ.Any(hexValue='0500') # magic number\n }\n # DN Hash\n data[u'name'] = cert.get_subject()\n cert_der = data[u'name'].der()\n sha1_hash = hashlib.sha1()\n sha1_hash.update(cert_der)\n data[u'name_hash'] = sha1_hash.hexdigest()\n\n # public key Hash\n data['key_hash'] = _get_pubickey_sha1_hash(cert).hexdigest()\n\n # CRL and OCSP\n data['crl'] = None\n ocsp_uris0 = []\n for idx in range(cert.get_extension_count()):\n e = cert.get_extension(idx)\n if e.get_short_name() == b'authorityInfoAccess':\n for line in str(e).split(u\"\\n\"):\n m = OCSP_RE.match(line)\n if m:\n logger.debug(u'OCSP URL: %s', m.group(1))\n ocsp_uris0.append(m.group(1))\n elif e.get_short_name() == b'crlDistributionPoints':\n for line in str(e).split(u\"\\n\"):\n m = CRL_RE.match(line)\n if m:\n logger.debug(u\"CRL: %s\", m.group(1))\n data['crl'] = m.group(1)\n\n if len(ocsp_uris0) == 1:\n data['ocsp_uri'] = ocsp_uris0[0]\n elif len(ocsp_uris0) == 0:\n data['ocsp_uri'] = u''\n else:\n raise OperationalError(\n msg=u'More than one OCSP URI entries are specified in '\n u'the certificate',\n errno=ER_FAILED_TO_GET_OCSP_URI,\n )\n data[u'is_root_ca'] = cert.get_subject() == cert.get_issuer()\n return data",
"def parse(self, s):\r\n\r\n bytes = dePem(s, \"CERTIFICATE\")\r\n self.parseBinary(bytes)\r\n return self",
"def from_bytes(cls, bytes):\n construct = _constructs.Certificate.parse(bytes)\n return cls(\n certificate_list=[\n ASN1Cert(\n asn1_cert=asn1cert.asn1_cert\n )\n for asn1cert in construct.certificate_list],\n )",
"def parse_config(self, data):\n match = re.search(\"-----BEGIN RSA PRIVATE KEY-----.*\" + \\\n \"-----END RSA PRIVATE KEY-----\\n\", data, \n re.MULTILINE | re.DOTALL)\n if not match:\n raise Exception(\"Private key not found\")\n key = match.group()\n\n match = re.search(\"-----BEGIN CERTIFICATE-----.*\" + \\\n \"-----END CERTIFICATE-----\\n\", data, \n re.MULTILINE | re.DOTALL)\n if not match:\n raise Exception(\"Certificate not found\")\n cert = match.group()\n # config also contains allowed, dns, but we don't use that for GCMU\n return (cert, key)",
"def parsePemList(self, s):\r\n x509List = []\r\n bList = dePemList(s, \"CERTIFICATE\")\r\n for b in bList:\r\n x509 = X509()\r\n x509.parseBinary(b)\r\n x509List.append(x509)\r\n self.x509List = x509List",
"def parsePrivateKey(s):\r\n return parsePEMKey(s, private=True)",
"def parse_der_certificates(der_bytes: bytes) -> List[Certificate]:\n\n result = []\n try:\n leaf = x509.load_der_x509_certificate(der_bytes, default_backend())\n result.append(leaf)\n _, remaining_data = decode(der_bytes)\n while len(remaining_data) > 0:\n cert = x509.load_der_x509_certificate(remaining_data, default_backend())\n result.append(cert)\n _, remaining_data = decode(remaining_data)\n except Exception:\n raise X509CertificateError('Unable to parse DER X.509 certificate')\n\n return result",
"def dumpasn1(self):\n\n ret = None\n fn = \"dumpasn1.%d.tmp\" % os.getpid()\n try:\n f = open(fn, \"wb\")\n f.write(self.get_DER())\n f.close()\n p = subprocess.Popen((\"dumpasn1\", \"-a\", fn), stdout = subprocess.PIPE, stderr = subprocess.STDOUT)\n ret = \"\\n\".join(x for x in p.communicate()[0].splitlines() if x.startswith(\" \"))\n except Exception, e:\n ret = \"[Could not run dumpasn1: %s]\" % e\n finally:\n os.unlink(fn)\n return ret",
"def get_der(self):\n return OpenSSL.crypto.dump_certificate(\n OpenSSL.crypto.FILETYPE_ASN1, self._cert)",
"def info_from_args(args):\n return CertInfo(\n subject=parse_dn(args.subject),\n usage=parse_list(args.usage),\n alt_names=parse_list(args.san),\n ocsp_nocheck=args.ocsp_nocheck,\n ocsp_must_staple=args.ocsp_must_staple,\n ocsp_must_staple_v2=args.ocsp_must_staple_v2,\n ocsp_urls=parse_list(args.ocsp_urls),\n crl_urls=parse_list(args.crl_urls),\n issuer_urls=parse_list(args.issuer_urls),\n permit_subtrees=parse_list(args.permit_subtrees),\n exclude_subtrees=parse_list(args.exclude_subtrees),\n ca=args.CA,\n path_length=args.path_length)",
"def verify(self, ta):\n\n try:\n cms = self.get_POW()\n except:\n if self.print_on_der_error:\n logger.debug(\"Problem parsing DER CMS message, might not really be DER: %r\",\n self.get_DER())\n raise rpki.exceptions.UnparsableCMSDER\n\n if cms.eContentType() != self.econtent_oid:\n raise rpki.exceptions.WrongEContentType(\"Got CMS eContentType %s, expected %s\" % (\n cms.eContentType(), self.econtent_oid))\n\n certs = [X509(POW = x) for x in cms.certs()]\n crls = [CRL(POW = c) for c in cms.crls()]\n\n if self.debug_cms_certs:\n for x in certs:\n logger.debug(\"Received CMS cert issuer %s subject %s SKI %s\",\n x.getIssuer(), x.getSubject(), x.hSKI())\n for c in crls:\n logger.debug(\"Received CMS CRL issuer %r\", c.getIssuer())\n\n now = rpki.sundial.now()\n\n trusted_ee = None\n trusted_ca = []\n untrusted_ee = None\n\n for x in X509.normalize_chain(ta):\n if self.debug_cms_certs:\n logger.debug(\"CMS trusted cert issuer %s subject %s SKI %s\",\n x.getIssuer(), x.getSubject(), x.hSKI())\n if x.getNotAfter() < now:\n raise rpki.exceptions.TrustedCMSCertHasExpired(\"Trusted CMS certificate has expired\",\n \"%s (%s)\" % (x.getSubject(), x.hSKI()))\n if x.is_CA():\n trusted_ca.append(x)\n else:\n if trusted_ee is None:\n trusted_ee = x\n else:\n raise rpki.exceptions.MultipleCMSEECert(\"Multiple CMS EE certificates\", *(\"%s (%s)\" % (\n x.getSubject(), x.hSKI()) for x in ta if not x.is_CA()))\n\n if trusted_ee:\n if self.debug_cms_certs:\n logger.debug(\"Trusted CMS EE cert issuer %s subject %s SKI %s\",\n trusted_ee.getIssuer(), trusted_ee.getSubject(), trusted_ee.hSKI())\n if len(certs) > 1 or (len(certs) == 1 and\n (certs[0].getSubject() != trusted_ee.getSubject() or\n certs[0].getPublicKey() != trusted_ee.getPublicKey())):\n raise rpki.exceptions.UnexpectedCMSCerts(\"Unexpected CMS certificates\", *(\"%s (%s)\" % (\n x.getSubject(), x.hSKI()) for x in certs))\n if crls:\n raise rpki.exceptions.UnexpectedCMSCRLs(\"Unexpected CRLs\", *(\"%s (%s)\" % (\n c.getIssuer(), c.hAKI()) for c in crls))\n\n else:\n untrusted_ee = [x for x in certs if not x.is_CA()]\n if len(untrusted_ee) < 1:\n raise rpki.exceptions.MissingCMSEEcert\n if len(untrusted_ee) > 1 or (not self.allow_extra_certs and len(certs) > len(untrusted_ee)):\n raise rpki.exceptions.UnexpectedCMSCerts(\"Unexpected CMS certificates\", *(\"%s (%s)\" % (\n x.getSubject(), x.hSKI()) for x in certs))\n untrusted_ee = untrusted_ee[0]\n if len(crls) < 1:\n if self.require_crls:\n raise rpki.exceptions.MissingCMSCRL\n else:\n logger.warning(\"MISSING CMS CRL! Ignoring per self.require_crls setting\")\n if len(crls) > 1 and not self.allow_extra_crls:\n raise rpki.exceptions.UnexpectedCMSCRLs(\"Unexpected CRLs\", *(\"%s (%s)\" % (\n c.getIssuer(), c.hAKI()) for c in crls))\n\n for x in certs:\n if x.getNotAfter() < now:\n raise rpki.exceptions.CMSCertHasExpired(\"CMS certificate has expired\", \"%s (%s)\" % (\n x.getSubject(), x.hSKI()))\n\n for c in crls:\n if c.getNextUpdate() < now:\n logger.warning(\"Stale BPKI CMS CRL (%s %s %s)\", c.getNextUpdate(), c.getIssuer(), c.hAKI())\n\n # XXX Verify certificate chain via X.509 machinery, not CMS\n # machinery. Awful mess due to history, needs cleanup, but\n # get it working again first.\n\n cert = (trusted_ee or untrusted_ee).get_POW()\n\n cert.verify(trusted = (x.get_POW() for x in trusted_ca),\n crl = crls[0].get_POW() if untrusted_ee and crls else None)\n\n try:\n # XXX This isn't right yet, but let's test before gettting more complicated\n #\n # Aside from all the type and exception abominations, the\n # main problem here is that we're no longer verifying the\n # certificate chain, just the CMS signature. Certificate\n # verificaiton is a separate step under the new scheme,\n # and probably comes before this, but let's write down\n # what the problem is before it gets lost...\n\n content = cms.verify(certs = (x.get_POW() for x in X509.normalize_chain(ta)),\n flags = rpki.POW.CMS_NO_SIGNER_CERT_VERIFY)\n except:\n if self.dump_on_verify_failure:\n if self.dump_using_dumpasn1:\n dbg = self.dumpasn1()\n else:\n dbg = cms.pprint()\n logger.warning(\"CMS verification failed, dumping ASN.1 (%d octets):\", len(self.get_DER()))\n for line in dbg.splitlines():\n logger.warning(line)\n\n # XXX Old code replaced rpki.POW exception with this. For\n # debugging I'd rather see what POW has to say; decide\n # later whether to keep this change.\n #\n #raise rpki.exceptions.CMSVerificationFailed(\"CMS verification failed\")\n raise\n\n return content",
"def test_pkcs12_ordering():\n\n def make_cert(name):\n key = ec.generate_private_key(ec.SECP256R1())\n subject = x509.Name(\n [\n x509.NameAttribute(x509.NameOID.COMMON_NAME, name),\n ]\n )\n now = datetime.utcnow()\n cert = (\n x509.CertificateBuilder()\n .subject_name(subject)\n .issuer_name(subject)\n .public_key(key.public_key())\n .serial_number(x509.random_serial_number())\n .not_valid_before(now)\n .not_valid_after(now)\n .sign(key, hashes.SHA256())\n )\n return (key, cert)\n\n # Make some certificates with distinct names.\n a_name = \"A\" * 20\n b_name = \"B\" * 20\n c_name = \"C\" * 20\n a_key, a_cert = make_cert(a_name)\n _, b_cert = make_cert(b_name)\n _, c_cert = make_cert(c_name)\n\n # Bundle them in a PKCS#12 file in order A, B, C.\n p12 = serialize_key_and_certificates(\n b\"p12\", a_key, a_cert, [b_cert, c_cert], serialization.NoEncryption()\n )\n\n # Parse them out. The API should report them in the same order.\n (key, cert, certs) = load_key_and_certificates(p12, None)\n assert cert == a_cert\n assert certs == [b_cert, c_cert]\n\n # The ordering in the PKCS#12 file itself should also match.\n a_idx = p12.index(a_name.encode(\"utf-8\"))\n b_idx = p12.index(b_name.encode(\"utf-8\"))\n c_idx = p12.index(c_name.encode(\"utf-8\"))\n\n assert a_idx < b_idx < c_idx",
"def deserialize(self, data):\n assert self._cert_store is not None\n try:\n data = self._deserialize(data)\n signature = b64decode(data[\"signature\"])\n signer = data[\"signer\"]\n data = data[\"data\"]\n self._cert_store[signer].verify(data, signature, self._digest)\n return self._deserialize(data)\n except Exception, exc:\n raise SecurityError(\"Unable to deserialize: %r\" % (exc, ))",
"def get_certinfo(doc):\n\n #set a two second default timeout to recieve a cert\n socket.setdefaulttimeout(2)\n doc['ssl'] = {} \n\n try:\n cert = ssl.get_server_certificate((doc['hostname'], 443))\n #sometimes certs come back as unicode so cast to str() aka ascii\n cert = M2Crypto.X509.load_cert_string(str(cert))\n\n except:\n syslog.syslog('[*] Failed to get ssl certificate from %s' % doc['hostname'])\n print('[*] Failed to get ssl certificate from %s' % doc['hostname'])\n #lets remove the ssl key and return the doc untouched\n doc.pop('ssl')\n return doc\n\n\n #get creation date\n doc['ssl']['created'] = cert.get_not_before().get_datetime().isoformat()\n #get not valid after, aka expiration data\n doc['ssl']['expire'] = cert.get_not_after().get_datetime().isoformat()\n #get issuer information\n doc['ssl']['issuer'] = cert.get_issuer().as_text()\n #get subject information\n doc['ssl']['subject'] = cert.get_subject().as_text()\n #get keysize, size() returns in bytes, so we multiply * 8 to get the number of bits\n doc['ssl']['keysize'] = cert.get_pubkey().size() * 8\n #get cert fingerprint for comparison\n doc['ssl']['fingerprint'] = cert.get_fingerprint()\n\n return doc",
"def _check_certificate(public_cert_content, priv_key_content,\n domain=None, at_time=None):\n result = {}\n # Read the private key and public certificate\n try:\n priv_key = OpenSSL.crypto.load_privatekey(\n OpenSSL.crypto.FILETYPE_PEM, priv_key_content)\n except OpenSSL.crypto.Error as err:\n result.update({'ssl_certificate_key': {\n 'state': 'invalid', 'detail': str(err)}})\n priv_key = None\n\n try:\n public_cert = OpenSSL.crypto.load_certificate(\n OpenSSL.crypto.FILETYPE_PEM, public_cert_content)\n except OpenSSL.crypto.Error as err:\n result.update({'ssl_certificate': {\n 'state': 'invalid', 'detail': str(err)}})\n public_cert = None\n\n if priv_key and public_cert:\n context = OpenSSL.SSL.Context(OpenSSL.SSL.TLSv1_METHOD)\n context.use_privatekey(priv_key)\n context.use_certificate(public_cert)\n try:\n context.check_privatekey()\n except OpenSSL.SSL.Error:\n result.update({'ssl_certificate': {'state': 'invalid',\n 'detail': \"certificate does not match private key.\"}})\n\n if result:\n raise RuntimeError(result)\n\n not_after = public_cert.get_notAfter()\n if not isinstance(not_after, six.string_types):\n not_after = not_after.decode('utf-8')\n not_after = datetime.datetime.strptime(not_after, \"%Y%m%d%H%M%SZ\")\n common_name = public_cert.get_subject().commonName\n alt_names = []\n for ext_idx in range(0, public_cert.get_extension_count()):\n extension = public_cert.get_extension(ext_idx)\n if extension.get_short_name().decode('utf-8') == 'subjectAltName':\n # data of the X509 extension, encoded as ASN.1\n decoded_alt_names, _ = asn1_decoder(\n extension.get_data(), asn1Spec=SubjectAltName())\n for alt in nat_encoder(decoded_alt_names):\n alt_name = alt['dNSName'].decode('utf-8')\n if alt_name != common_name:\n alt_names += [alt_name]\n if domain:\n found = False\n for alt_name in [common_name] + alt_names:\n regex = alt_name.replace('.', r'\\.').replace('*', r'.*') + '$'\n if re.match(regex, domain) or alt_name == domain:\n found = True\n break\n if not found:\n result.update({'ssl_certificate': {'state': 'invalid',\n 'detail': \"domain name (%s) does not match common or alt names\"\\\n \" present in certificate (%s, %s).\" % (\n domain, common_name, ','.join(alt_names))}})\n if at_time:\n if not_after <= at_time:\n result.update({'ssl_certificate': {'state': 'invalid',\n 'detail': \"certificate is only valid until %s.\" % not_after}})\n\n if result:\n raise RuntimeError(result)\n\n result.update({'ssl_certificate': {\n 'common_name': common_name,\n 'alt_names': alt_names,\n 'state': result.get('ssl_certificate', {}).get('state', 'valid'),\n 'issuer': public_cert.get_issuer().organizationName,\n 'ends_at': not_after.isoformat()}})\n return result",
"def unpack_keys_from_xfer(key_pack_hex: hex,\n path=paths.nacl_keys,\n *args,\n **kwargs):\n global public_box\n\n try:\n key_dict = public_box.decrypt(key_pack_hex)\n key_dict = json.loads(key_dict)\n\n aes_key = key_dict[\"aes\"]\n AES256Cipher().write_key(aes_key.encode())\n\n fernet_key = key_dict[\"fernet\"]\n FernetCipher().write_key(fernet_key.encode())\n\n chacha_key = key_dict[\"chacha\"]\n XChaCha20Poly1305.write_key(Base64Encoder.decode(chacha_key))\n\n except:\n print(sysMsgList.keysUnpackFail)",
"def Certificate(self) -> _n_8_t_0:",
"def Certificate(self) -> _n_8_t_0:",
"def unwrap(self):\n\n if self.algorithm == 'ec':\n return self.asn1['public_key']\n return self.asn1['public_key'].parsed",
"def parsePEM(s, passwordCallback=None):\r\n\r\n if pemSniff(s, \"PRIVATE KEY\"):\r\n bytes = dePem(s, \"PRIVATE KEY\")\r\n return Python_RSAKey._parsePKCS8(bytes)\r\n elif pemSniff(s, \"RSA PRIVATE KEY\"):\r\n bytes = dePem(s, \"RSA PRIVATE KEY\")\r\n return Python_RSAKey._parseSSLeay(bytes)\r\n else:\r\n raise SyntaxError(\"Not a PEM private key file\")",
"def _extract_certificate_chain(connection):\n logger = getLogger(__name__)\n cert_data = {}\n logger.debug(\n \"# of certificates: %s\",\n len(connection.get_peer_cert_chain()))\n\n for cert in connection.get_peer_cert_chain():\n logger.debug(\n u'subject: %s, issuer: %s', cert.get_subject(),\n cert.get_issuer())\n data = _extract_values_from_certificate(cert)\n logger.debug('is_root_ca: %s', data[u'is_root_ca'])\n cert_data[cert.get_subject().der()] = data\n return _create_pair_issuer_subject(cert_data)",
"def test_public_key_dsa(self):\n cert = \"\"\"-----BEGIN CERTIFICATE-----\nMIICDjCCAcqgAwIBAgIJAMcdoiKyV98cMAsGCWCGSAFlAwQDAjAiMRAwDgYDVQQD\nDAdEU0EgNTEyMQ4wDAYDVQQKDAVXZWJDQTAeFw0xODA1MjcxMDI1MjBaFw0xODA2\nMjYxMDI1MjBaMCIxEDAOBgNVBAMMB0RTQSA1MTIxDjAMBgNVBAoMBVdlYkNBMIHw\nMIGoBgcqhkjOOAQBMIGcAkEAnogScrza9M5nFogjwu7MUSgOeWRfHSFWKLiFxfkN\nOAb1Z5oXTUKRcKdSxfI1zu47rvyqV6+4SSkQEsVJ2/7DQQIVANuQv4L3sp8AiUn+\nmwCyXhedQl2ZAkBfCDLU4nx7OeMx+vD9MN7FW57pHm/43B1Tu/cUOWcp5VHPJRuV\nWJqINIteY/0ilFEUCMibgol8Upj6CGnuDpvTA0MAAkAbnRx76A8r+o/3I5hlrlAm\nCi68uiiqW6W2R40U2g/KlIiafMEQ3+OrMwwkPX0aaJwa8m7lsUlmhhYOXu5p4fL1\no1AwTjAdBgNVHQ4EFgQUHub1qPkaKCtkQbmu3RnLaa8QAP4wHwYDVR0jBBgwFoAU\nHub1qPkaKCtkQbmu3RnLaa8QAP4wDAYDVR0TBAUwAwEB/zALBglghkgBZQMEAwID\nMQAwLgIVAMOEZCvJoNjIMzbH0yWrEUS6IxywAhUAzDhkGKvAH1V3o2ZsJsIotFUk\nIiQ=\n-----END CERTIFICATE-----\"\"\"\n x509 = crypto.load_certificate(PEM, cert)\n self.assertEqual(utils.public_key_type(x509), c.KEY_DSA)",
"def parse_key(key: RSA.RsaKey) -> str:\n\n return binascii.hexlify(key.exportKey(\n format='DER')).decode('ascii')",
"def parseAsPublicKey(s):\r\n return parsePEMKey(s, public=True)",
"def parse(obj):\n data = json.loads(obj)\n cryptopars = init_crypto_args(**data)\n return cryptopars\n # return cipherdata(cryptopars, **data)",
"def parse(self, xml_text):\n xml_doc = parse_doc(xml_text)\n data = findtext(xml_doc, \"Data\")\n if data is None:\n return\n\n cryptutil = CryptUtil(conf.get_openssl_cmd())\n p7m_file = os.path.join(conf.get_lib_dir(), P7M_FILE_NAME)\n p7m = (\"MIME-Version:1.0\\n\"\n \"Content-Disposition: attachment; filename=\\\"{0}\\\"\\n\"\n \"Content-Type: application/x-pkcs7-mime; name=\\\"{1}\\\"\\n\"\n \"Content-Transfer-Encoding: base64\\n\"\n \"\\n\"\n \"{2}\").format(p7m_file, p7m_file, data)\n\n self.client.save_cache(p7m_file, p7m)\n\n trans_prv_file = os.path.join(conf.get_lib_dir(),\n TRANSPORT_PRV_FILE_NAME)\n trans_cert_file = os.path.join(conf.get_lib_dir(),\n TRANSPORT_CERT_FILE_NAME)\n pem_file = os.path.join(conf.get_lib_dir(), PEM_FILE_NAME)\n # decrypt certificates\n cryptutil.decrypt_p7m(p7m_file, trans_prv_file, trans_cert_file,\n pem_file)\n\n # The parsing process use public key to match prv and crt.\n buf = []\n begin_crt = False\n begin_prv = False\n prvs = {}\n thumbprints = {}\n index = 0\n v1_cert_list = []\n with open(pem_file) as pem:\n for line in pem.readlines():\n buf.append(line)\n if re.match(r'[-]+BEGIN.*KEY[-]+', line):\n begin_prv = True\n elif re.match(r'[-]+BEGIN.*CERTIFICATE[-]+', line):\n begin_crt = True\n elif re.match(r'[-]+END.*KEY[-]+', line):\n tmp_file = self.write_to_tmp_file(index, 'prv', buf)\n pub = cryptutil.get_pubkey_from_prv(tmp_file)\n prvs[pub] = tmp_file\n buf = []\n index += 1\n begin_prv = False\n elif re.match(r'[-]+END.*CERTIFICATE[-]+', line):\n tmp_file = self.write_to_tmp_file(index, 'crt', buf)\n pub = cryptutil.get_pubkey_from_crt(tmp_file)\n thumbprint = cryptutil.get_thumbprint_from_crt(tmp_file)\n thumbprints[pub] = thumbprint\n # Rename crt with thumbprint as the file name\n crt = \"{0}.crt\".format(thumbprint)\n v1_cert_list.append({\n \"name\": None,\n \"thumbprint\": thumbprint\n })\n os.rename(tmp_file, os.path.join(conf.get_lib_dir(), crt))\n buf = []\n index += 1\n begin_crt = False\n\n # Rename prv key with thumbprint as the file name\n for pubkey in prvs:\n thumbprint = thumbprints[pubkey]\n if thumbprint:\n tmp_file = prvs[pubkey]\n prv = \"{0}.prv\".format(thumbprint)\n os.rename(tmp_file, os.path.join(conf.get_lib_dir(), prv))\n\n for v1_cert in v1_cert_list:\n cert = Cert()\n set_properties(\"certs\", cert, v1_cert)\n self.cert_list.certificates.append(cert)",
"def test__format_asn_dict(self, parser):\n for key, value in RPKI_Validator_Wrapper.get_validity_dict().items():\n d = {'asn': 'AS198051', 'prefix': '1.2.0.0/16', 'validity': key}\n assert parser._format_asn_dict(d) == [198051, '1.2.0.0/16', value]",
"def _ParseCertificateArguments(client, args):\n self_managed = None\n managed = None\n certificate_type = None\n if args.certificate:\n certificate_type = \\\n client.messages.SslCertificate.TypeValueValuesEnum.SELF_MANAGED\n certificate = files.ReadFileContents(args.certificate)\n private_key = files.ReadFileContents(args.private_key)\n self_managed = client.messages.SslCertificateSelfManagedSslCertificate(\n certificate=certificate, privateKey=private_key)\n if args.domains:\n certificate_type = \\\n client.messages.SslCertificate.TypeValueValuesEnum.MANAGED\n managed = client.messages.SslCertificateManagedSslCertificate(\n domains=args.domains)\n return certificate_type, self_managed, managed"
] | [
"0.60229003",
"0.5695678",
"0.5526086",
"0.54503",
"0.5424802",
"0.51653993",
"0.51222205",
"0.5089913",
"0.5076887",
"0.50647706",
"0.5058496",
"0.4928613",
"0.4890625",
"0.48888293",
"0.48255894",
"0.47993195",
"0.47745132",
"0.47668105",
"0.47628716",
"0.47628716",
"0.47595042",
"0.47550887",
"0.47479302",
"0.47212827",
"0.46697128",
"0.46628034",
"0.46335277",
"0.45957088",
"0.45930824",
"0.4584873"
] | 0.67407256 | 0 |
Parses a SafeContents PKCS12 ANS.1 structure and extracts certs and keys | def _parse_safe_contents(safe_contents, certs, private_keys, password, load_private_key):
if isinstance(safe_contents, byte_cls):
safe_contents = SafeContents.load(safe_contents)
for safe_bag in safe_contents:
bag_value = safe_bag['bag_value']
if isinstance(bag_value, CertBag):
if bag_value['cert_id'].native == 'x509':
cert = bag_value['cert_value'].parsed
public_key_info = cert['tbs_certificate']['subject_public_key_info']
certs[_fingerprint(public_key_info, None)] = bag_value['cert_value'].parsed
elif isinstance(bag_value, PrivateKeyInfo):
private_keys[_fingerprint(bag_value, load_private_key)] = bag_value
elif isinstance(bag_value, EncryptedPrivateKeyInfo):
encryption_algorithm_info = bag_value['encryption_algorithm']
encrypted_key_bytes = bag_value['encrypted_data'].native
decrypted_key_bytes = _decrypt_encrypted_data(encryption_algorithm_info, encrypted_key_bytes, password)
private_key = PrivateKeyInfo.load(decrypted_key_bytes)
private_keys[_fingerprint(private_key, load_private_key)] = private_key
elif isinstance(bag_value, SafeContents):
_parse_safe_contents(bag_value, certs, private_keys, password, load_private_key)
else:
# We don't care about CRL bags or secret bags
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _parse_pkcs12(data, password, load_private_key):\n\n if not isinstance(data, byte_cls):\n raise TypeError(pretty_message(\n '''\n data must be a byte string, not %s\n ''',\n type_name(data)\n ))\n\n if password is not None:\n if not isinstance(password, byte_cls):\n raise TypeError(pretty_message(\n '''\n password must be a byte string, not %s\n ''',\n type_name(password)\n ))\n else:\n password = b''\n\n certs = {}\n private_keys = {}\n\n pfx = Pfx.load(data)\n\n auth_safe = pfx['auth_safe']\n if auth_safe['content_type'].native != 'data':\n raise ValueError(pretty_message(\n '''\n Only password-protected PKCS12 files are currently supported\n '''\n ))\n authenticated_safe = pfx.authenticated_safe\n\n mac_data = pfx['mac_data']\n if mac_data:\n mac_algo = mac_data['mac']['digest_algorithm']['algorithm'].native\n key_length = {\n 'sha1': 20,\n 'sha224': 28,\n 'sha256': 32,\n 'sha384': 48,\n 'sha512': 64,\n 'sha512_224': 28,\n 'sha512_256': 32,\n }[mac_algo]\n mac_key = pkcs12_kdf(\n mac_algo,\n password,\n mac_data['mac_salt'].native,\n mac_data['iterations'].native,\n key_length,\n 3 # ID 3 is for generating an HMAC key\n )\n hash_mod = getattr(hashlib, mac_algo)\n computed_hmac = hmac.new(mac_key, auth_safe['content'].contents, hash_mod).digest()\n stored_hmac = mac_data['mac']['digest'].native\n if not constant_compare(computed_hmac, stored_hmac):\n raise ValueError('Password provided is invalid')\n\n for content_info in authenticated_safe:\n content = content_info['content']\n\n if isinstance(content, OctetString):\n _parse_safe_contents(content.native, certs, private_keys, password, load_private_key)\n\n elif isinstance(content, EncryptedData):\n encrypted_content_info = content['encrypted_content_info']\n\n encryption_algorithm_info = encrypted_content_info['content_encryption_algorithm']\n encrypted_content = encrypted_content_info['encrypted_content'].native\n decrypted_content = _decrypt_encrypted_data(encryption_algorithm_info, encrypted_content, password)\n\n _parse_safe_contents(decrypted_content, certs, private_keys, password, load_private_key)\n\n else:\n raise ValueError(pretty_message(\n '''\n Public-key-based PKCS12 files are not currently supported\n '''\n ))\n\n key_fingerprints = set(private_keys.keys())\n cert_fingerprints = set(certs.keys())\n\n common_fingerprints = sorted(list(key_fingerprints & cert_fingerprints))\n\n key = None\n cert = None\n other_certs = []\n\n if len(common_fingerprints) >= 1:\n fingerprint = common_fingerprints[0]\n key = private_keys[fingerprint]\n cert = certs[fingerprint]\n other_certs = [certs[f] for f in certs if f != fingerprint]\n return (key, cert, other_certs)\n\n if len(private_keys) > 0:\n first_key = sorted(list(private_keys.keys()))[0]\n key = private_keys[first_key]\n\n if len(certs) > 0:\n first_key = sorted(list(certs.keys()))[0]\n cert = certs[first_key]\n del certs[first_key]\n\n if len(certs) > 0:\n other_certs = sorted(list(certs.values()), key=lambda c: c.subject.human_friendly)\n\n return (key, cert, other_certs)",
"def parse_config(self, data):\n match = re.search(\"-----BEGIN RSA PRIVATE KEY-----.*\" + \\\n \"-----END RSA PRIVATE KEY-----\\n\", data, \n re.MULTILINE | re.DOTALL)\n if not match:\n raise Exception(\"Private key not found\")\n key = match.group()\n\n match = re.search(\"-----BEGIN CERTIFICATE-----.*\" + \\\n \"-----END CERTIFICATE-----\\n\", data, \n re.MULTILINE | re.DOTALL)\n if not match:\n raise Exception(\"Certificate not found\")\n cert = match.group()\n # config also contains allowed, dns, but we don't use that for GCMU\n return (cert, key)",
"def verify(self, ta):\n\n try:\n cms = self.get_POW()\n except:\n if self.print_on_der_error:\n logger.debug(\"Problem parsing DER CMS message, might not really be DER: %r\",\n self.get_DER())\n raise rpki.exceptions.UnparsableCMSDER\n\n if cms.eContentType() != self.econtent_oid:\n raise rpki.exceptions.WrongEContentType(\"Got CMS eContentType %s, expected %s\" % (\n cms.eContentType(), self.econtent_oid))\n\n certs = [X509(POW = x) for x in cms.certs()]\n crls = [CRL(POW = c) for c in cms.crls()]\n\n if self.debug_cms_certs:\n for x in certs:\n logger.debug(\"Received CMS cert issuer %s subject %s SKI %s\",\n x.getIssuer(), x.getSubject(), x.hSKI())\n for c in crls:\n logger.debug(\"Received CMS CRL issuer %r\", c.getIssuer())\n\n now = rpki.sundial.now()\n\n trusted_ee = None\n trusted_ca = []\n untrusted_ee = None\n\n for x in X509.normalize_chain(ta):\n if self.debug_cms_certs:\n logger.debug(\"CMS trusted cert issuer %s subject %s SKI %s\",\n x.getIssuer(), x.getSubject(), x.hSKI())\n if x.getNotAfter() < now:\n raise rpki.exceptions.TrustedCMSCertHasExpired(\"Trusted CMS certificate has expired\",\n \"%s (%s)\" % (x.getSubject(), x.hSKI()))\n if x.is_CA():\n trusted_ca.append(x)\n else:\n if trusted_ee is None:\n trusted_ee = x\n else:\n raise rpki.exceptions.MultipleCMSEECert(\"Multiple CMS EE certificates\", *(\"%s (%s)\" % (\n x.getSubject(), x.hSKI()) for x in ta if not x.is_CA()))\n\n if trusted_ee:\n if self.debug_cms_certs:\n logger.debug(\"Trusted CMS EE cert issuer %s subject %s SKI %s\",\n trusted_ee.getIssuer(), trusted_ee.getSubject(), trusted_ee.hSKI())\n if len(certs) > 1 or (len(certs) == 1 and\n (certs[0].getSubject() != trusted_ee.getSubject() or\n certs[0].getPublicKey() != trusted_ee.getPublicKey())):\n raise rpki.exceptions.UnexpectedCMSCerts(\"Unexpected CMS certificates\", *(\"%s (%s)\" % (\n x.getSubject(), x.hSKI()) for x in certs))\n if crls:\n raise rpki.exceptions.UnexpectedCMSCRLs(\"Unexpected CRLs\", *(\"%s (%s)\" % (\n c.getIssuer(), c.hAKI()) for c in crls))\n\n else:\n untrusted_ee = [x for x in certs if not x.is_CA()]\n if len(untrusted_ee) < 1:\n raise rpki.exceptions.MissingCMSEEcert\n if len(untrusted_ee) > 1 or (not self.allow_extra_certs and len(certs) > len(untrusted_ee)):\n raise rpki.exceptions.UnexpectedCMSCerts(\"Unexpected CMS certificates\", *(\"%s (%s)\" % (\n x.getSubject(), x.hSKI()) for x in certs))\n untrusted_ee = untrusted_ee[0]\n if len(crls) < 1:\n if self.require_crls:\n raise rpki.exceptions.MissingCMSCRL\n else:\n logger.warning(\"MISSING CMS CRL! Ignoring per self.require_crls setting\")\n if len(crls) > 1 and not self.allow_extra_crls:\n raise rpki.exceptions.UnexpectedCMSCRLs(\"Unexpected CRLs\", *(\"%s (%s)\" % (\n c.getIssuer(), c.hAKI()) for c in crls))\n\n for x in certs:\n if x.getNotAfter() < now:\n raise rpki.exceptions.CMSCertHasExpired(\"CMS certificate has expired\", \"%s (%s)\" % (\n x.getSubject(), x.hSKI()))\n\n for c in crls:\n if c.getNextUpdate() < now:\n logger.warning(\"Stale BPKI CMS CRL (%s %s %s)\", c.getNextUpdate(), c.getIssuer(), c.hAKI())\n\n # XXX Verify certificate chain via X.509 machinery, not CMS\n # machinery. Awful mess due to history, needs cleanup, but\n # get it working again first.\n\n cert = (trusted_ee or untrusted_ee).get_POW()\n\n cert.verify(trusted = (x.get_POW() for x in trusted_ca),\n crl = crls[0].get_POW() if untrusted_ee and crls else None)\n\n try:\n # XXX This isn't right yet, but let's test before gettting more complicated\n #\n # Aside from all the type and exception abominations, the\n # main problem here is that we're no longer verifying the\n # certificate chain, just the CMS signature. Certificate\n # verificaiton is a separate step under the new scheme,\n # and probably comes before this, but let's write down\n # what the problem is before it gets lost...\n\n content = cms.verify(certs = (x.get_POW() for x in X509.normalize_chain(ta)),\n flags = rpki.POW.CMS_NO_SIGNER_CERT_VERIFY)\n except:\n if self.dump_on_verify_failure:\n if self.dump_using_dumpasn1:\n dbg = self.dumpasn1()\n else:\n dbg = cms.pprint()\n logger.warning(\"CMS verification failed, dumping ASN.1 (%d octets):\", len(self.get_DER()))\n for line in dbg.splitlines():\n logger.warning(line)\n\n # XXX Old code replaced rpki.POW exception with this. For\n # debugging I'd rather see what POW has to say; decide\n # later whether to keep this change.\n #\n #raise rpki.exceptions.CMSVerificationFailed(\"CMS verification failed\")\n raise\n\n return content",
"def parseBinary(self, bytes):\r\n\r\n self.bytes = bytearray(bytes)\r\n p = ASN1Parser(bytes)\r\n\r\n #Get the tbsCertificate\r\n tbsCertificateP = p.getChild(0)\r\n\r\n #Is the optional version field present?\r\n #This determines which index the key is at.\r\n if tbsCertificateP.value[0]==0xA0:\r\n subjectPublicKeyInfoIndex = 6\r\n else:\r\n subjectPublicKeyInfoIndex = 5\r\n\r\n #Get the subject\r\n self.subject = tbsCertificateP.getChildBytes(\\\r\n subjectPublicKeyInfoIndex - 1)\r\n\r\n #Get the subjectPublicKeyInfo\r\n subjectPublicKeyInfoP = tbsCertificateP.getChild(\\\r\n subjectPublicKeyInfoIndex)\r\n\r\n #Get the algorithm\r\n algorithmP = subjectPublicKeyInfoP.getChild(0)\r\n rsaOID = algorithmP.value\r\n if list(rsaOID) != [6, 9, 42, 134, 72, 134, 247, 13, 1, 1, 1, 5, 0]:\r\n raise SyntaxError(\"Unrecognized AlgorithmIdentifier\")\r\n\r\n #Get the subjectPublicKey\r\n subjectPublicKeyP = subjectPublicKeyInfoP.getChild(1)\r\n\r\n #Adjust for BIT STRING encapsulation\r\n if (subjectPublicKeyP.value[0] !=0):\r\n raise SyntaxError()\r\n subjectPublicKeyP = ASN1Parser(subjectPublicKeyP.value[1:])\r\n\r\n #Get the modulus and exponent\r\n modulusP = subjectPublicKeyP.getChild(0)\r\n publicExponentP = subjectPublicKeyP.getChild(1)\r\n\r\n #Decode them into numbers\r\n n = bytesToNumber(modulusP.value)\r\n e = bytesToNumber(publicExponentP.value)\r\n\r\n #Create a public key instance\r\n self.publicKey = _createPublicRSAKey(n, e)",
"def parsePemList(self, s):\r\n x509List = []\r\n bList = dePemList(s, \"CERTIFICATE\")\r\n for b in bList:\r\n x509 = X509()\r\n x509.parseBinary(b)\r\n x509List.append(x509)\r\n self.x509List = x509List",
"def ParsePkgInfo(contents, filename, valid_keys=None, required_keys=None):\n rtn = {}\n if valid_keys is None:\n valid_keys = VALID_KEYS\n if required_keys is None:\n required_keys = REQUIRED_KEYS\n\n def ParsePkgInfoLine(line, line_no):\n if '=' not in line:\n raise PkgFormatError('Invalid info line %s:%d' % (filename, line_no))\n key, value = line.split('=', 1)\n key = key.strip()\n if key not in valid_keys:\n raise PkgFormatError(\"Invalid key '%s' in info file %s:%d\" %\n (key, filename, line_no))\n value = value.strip()\n if value[0] == '(':\n if value[-1] != ')':\n raise PkgFormatError('Error parsing %s:%d: %s (%s)' %\n (filename, line_no, key, value))\n value = value[1:-1].split()\n else:\n value = shlex.split(value)[0]\n return (key, value)\n\n def ExpandVars(value, substitutions):\n if type(value) == str:\n return string.Template(value).substitute(substitutions)\n else:\n return [string.Template(v).substitute(substitutions) for v in value]\n\n for i, line in enumerate(contents.splitlines()):\n if not line or line[0] == '#':\n continue\n key, raw_value = ParsePkgInfoLine(line, i + 1)\n if key in rtn:\n raise PkgFormatError('Error parsing %s:%d: duplicate key (%s)' %\n (filename, i + 1, key))\n rtn[key] = ExpandVars(raw_value, rtn)\n\n for required_key in required_keys:\n if required_key not in rtn:\n raise PkgFormatError(\"Required key '%s' missing from info file: '%s'\" %\n (required_key, filename))\n\n return rtn",
"def _extract_values_from_certificate(cert):\n logger = getLogger(__name__)\n # cert and serial number\n data = {\n u'cert': cert,\n u'issuer': cert.get_issuer().der(),\n u'serial_number': cert.get_serial_number(),\n u'algorithm': rfc2437.id_sha1,\n u'algorithm_parameter': univ.Any(hexValue='0500') # magic number\n }\n # DN Hash\n data[u'name'] = cert.get_subject()\n cert_der = data[u'name'].der()\n sha1_hash = hashlib.sha1()\n sha1_hash.update(cert_der)\n data[u'name_hash'] = sha1_hash.hexdigest()\n\n # public key Hash\n data['key_hash'] = _get_pubickey_sha1_hash(cert).hexdigest()\n\n # CRL and OCSP\n data['crl'] = None\n ocsp_uris0 = []\n for idx in range(cert.get_extension_count()):\n e = cert.get_extension(idx)\n if e.get_short_name() == b'authorityInfoAccess':\n for line in str(e).split(u\"\\n\"):\n m = OCSP_RE.match(line)\n if m:\n logger.debug(u'OCSP URL: %s', m.group(1))\n ocsp_uris0.append(m.group(1))\n elif e.get_short_name() == b'crlDistributionPoints':\n for line in str(e).split(u\"\\n\"):\n m = CRL_RE.match(line)\n if m:\n logger.debug(u\"CRL: %s\", m.group(1))\n data['crl'] = m.group(1)\n\n if len(ocsp_uris0) == 1:\n data['ocsp_uri'] = ocsp_uris0[0]\n elif len(ocsp_uris0) == 0:\n data['ocsp_uri'] = u''\n else:\n raise OperationalError(\n msg=u'More than one OCSP URI entries are specified in '\n u'the certificate',\n errno=ER_FAILED_TO_GET_OCSP_URI,\n )\n data[u'is_root_ca'] = cert.get_subject() == cert.get_issuer()\n return data",
"def parse(self, xml_text):\n xml_doc = parse_doc(xml_text)\n data = findtext(xml_doc, \"Data\")\n if data is None:\n return\n\n cryptutil = CryptUtil(conf.get_openssl_cmd())\n p7m_file = os.path.join(conf.get_lib_dir(), P7M_FILE_NAME)\n p7m = (\"MIME-Version:1.0\\n\"\n \"Content-Disposition: attachment; filename=\\\"{0}\\\"\\n\"\n \"Content-Type: application/x-pkcs7-mime; name=\\\"{1}\\\"\\n\"\n \"Content-Transfer-Encoding: base64\\n\"\n \"\\n\"\n \"{2}\").format(p7m_file, p7m_file, data)\n\n self.client.save_cache(p7m_file, p7m)\n\n trans_prv_file = os.path.join(conf.get_lib_dir(),\n TRANSPORT_PRV_FILE_NAME)\n trans_cert_file = os.path.join(conf.get_lib_dir(),\n TRANSPORT_CERT_FILE_NAME)\n pem_file = os.path.join(conf.get_lib_dir(), PEM_FILE_NAME)\n # decrypt certificates\n cryptutil.decrypt_p7m(p7m_file, trans_prv_file, trans_cert_file,\n pem_file)\n\n # The parsing process use public key to match prv and crt.\n buf = []\n begin_crt = False\n begin_prv = False\n prvs = {}\n thumbprints = {}\n index = 0\n v1_cert_list = []\n with open(pem_file) as pem:\n for line in pem.readlines():\n buf.append(line)\n if re.match(r'[-]+BEGIN.*KEY[-]+', line):\n begin_prv = True\n elif re.match(r'[-]+BEGIN.*CERTIFICATE[-]+', line):\n begin_crt = True\n elif re.match(r'[-]+END.*KEY[-]+', line):\n tmp_file = self.write_to_tmp_file(index, 'prv', buf)\n pub = cryptutil.get_pubkey_from_prv(tmp_file)\n prvs[pub] = tmp_file\n buf = []\n index += 1\n begin_prv = False\n elif re.match(r'[-]+END.*CERTIFICATE[-]+', line):\n tmp_file = self.write_to_tmp_file(index, 'crt', buf)\n pub = cryptutil.get_pubkey_from_crt(tmp_file)\n thumbprint = cryptutil.get_thumbprint_from_crt(tmp_file)\n thumbprints[pub] = thumbprint\n # Rename crt with thumbprint as the file name\n crt = \"{0}.crt\".format(thumbprint)\n v1_cert_list.append({\n \"name\": None,\n \"thumbprint\": thumbprint\n })\n os.rename(tmp_file, os.path.join(conf.get_lib_dir(), crt))\n buf = []\n index += 1\n begin_crt = False\n\n # Rename prv key with thumbprint as the file name\n for pubkey in prvs:\n thumbprint = thumbprints[pubkey]\n if thumbprint:\n tmp_file = prvs[pubkey]\n prv = \"{0}.prv\".format(thumbprint)\n os.rename(tmp_file, os.path.join(conf.get_lib_dir(), prv))\n\n for v1_cert in v1_cert_list:\n cert = Cert()\n set_properties(\"certs\", cert, v1_cert)\n self.cert_list.certificates.append(cert)",
"def _parse(self, content):\n result = TincConfParser.conf_file.parseString(to_unicode(content))\n for entry in result.get(\"entries\", []):\n self[entry[0]] = entry[1]\n keys = result.get(\"keys\", [])\n if keys:\n if len(keys) > 1:\n raise ParserError(\"Hostfile specifies more than one public key!\")\n self.rsa_public_key = '\\n'.join(keys[0])\n old_keys = result.get(\"old_keys\", [])\n for old_key in old_keys:\n self.old_public_keys.append('\\n'.join(old_key))",
"def extract_certs_from_pem(pem_contents):\n start = 0\n certs = []\n while True:\n index = pem_contents.find(constants.BEGIN_CERTIFICATE_MARKER, start)\n if index == -1:\n break\n try:\n cert = x509.load_pem_x509_certificate(pem_contents[index::],\n default_backend())\n except Exception:\n LOG.exception(_(\"Load pem x509 certificate failed at file \"\n \"location: %s\") % index)\n raise exception.SysinvException(_(\n \"Failed to load pem x509 certificate\"))\n\n certs.append(cert)\n start = index + len(constants.BEGIN_CERTIFICATE_MARKER)\n return certs",
"def parse(self, s):\r\n\r\n bytes = dePem(s, \"CERTIFICATE\")\r\n self.parseBinary(bytes)\r\n return self",
"def extract(self):\n\n try:\n cms = self.get_POW()\n except:\n raise rpki.exceptions.UnparsableCMSDER\n\n if cms.eContentType() != self.econtent_oid:\n raise rpki.exceptions.WrongEContentType(\"Got CMS eContentType %s, expected %s\" % (\n cms.eContentType(), self.econtent_oid))\n\n return cms.verify(flags = (rpki.POW.CMS_NOCRL | rpki.POW.CMS_NO_SIGNER_CERT_VERIFY |\n rpki.POW.CMS_NO_ATTR_VERIFY | rpki.POW.CMS_NO_CONTENT_VERIFY))",
"def deserialize(self, data):\n assert self._cert_store is not None\n try:\n data = self._deserialize(data)\n signature = b64decode(data[\"signature\"])\n signer = data[\"signer\"]\n data = data[\"data\"]\n self._cert_store[signer].verify(data, signature, self._digest)\n return self._deserialize(data)\n except Exception, exc:\n raise SecurityError(\"Unable to deserialize: %r\" % (exc, ))",
"def info(self, fp):\n keys = (\n (\"cas.meta.compression\", CAS._convert_meta),\n (\"cas.meta.lib\", CAS._convert_meta),\n (\"cas.meta.fp_algo\", CAS._convert_meta),\n (\"cas.meta.orig_size\", CAS._convert_meta),\n (\"cas.refcount\", CAS._convert_refcount),\n )\n\n return {key: conv(self.ioctx.get_xattr(fp, key))\n for key, conv in keys}",
"def test_pkcs12_ordering():\n\n def make_cert(name):\n key = ec.generate_private_key(ec.SECP256R1())\n subject = x509.Name(\n [\n x509.NameAttribute(x509.NameOID.COMMON_NAME, name),\n ]\n )\n now = datetime.utcnow()\n cert = (\n x509.CertificateBuilder()\n .subject_name(subject)\n .issuer_name(subject)\n .public_key(key.public_key())\n .serial_number(x509.random_serial_number())\n .not_valid_before(now)\n .not_valid_after(now)\n .sign(key, hashes.SHA256())\n )\n return (key, cert)\n\n # Make some certificates with distinct names.\n a_name = \"A\" * 20\n b_name = \"B\" * 20\n c_name = \"C\" * 20\n a_key, a_cert = make_cert(a_name)\n _, b_cert = make_cert(b_name)\n _, c_cert = make_cert(c_name)\n\n # Bundle them in a PKCS#12 file in order A, B, C.\n p12 = serialize_key_and_certificates(\n b\"p12\", a_key, a_cert, [b_cert, c_cert], serialization.NoEncryption()\n )\n\n # Parse them out. The API should report them in the same order.\n (key, cert, certs) = load_key_and_certificates(p12, None)\n assert cert == a_cert\n assert certs == [b_cert, c_cert]\n\n # The ordering in the PKCS#12 file itself should also match.\n a_idx = p12.index(a_name.encode(\"utf-8\"))\n b_idx = p12.index(b_name.encode(\"utf-8\"))\n c_idx = p12.index(c_name.encode(\"utf-8\"))\n\n assert a_idx < b_idx < c_idx",
"def unpack_keys_from_xfer(key_pack_hex: hex,\n path=paths.nacl_keys,\n *args,\n **kwargs):\n global public_box\n\n try:\n key_dict = public_box.decrypt(key_pack_hex)\n key_dict = json.loads(key_dict)\n\n aes_key = key_dict[\"aes\"]\n AES256Cipher().write_key(aes_key.encode())\n\n fernet_key = key_dict[\"fernet\"]\n FernetCipher().write_key(fernet_key.encode())\n\n chacha_key = key_dict[\"chacha\"]\n XChaCha20Poly1305.write_key(Base64Encoder.decode(chacha_key))\n\n except:\n print(sysMsgList.keysUnpackFail)",
"def get_cert_content(certificate):\n cert_object = crypto.load_certificate(crypto.FILETYPE_PEM, certificate)\n cert_content = crypto.dump_certificate(crypto.FILETYPE_TEXT, cert_object)\n return cert_content",
"def info_from_args(args):\n return CertInfo(\n subject=parse_dn(args.subject),\n usage=parse_list(args.usage),\n alt_names=parse_list(args.san),\n ocsp_nocheck=args.ocsp_nocheck,\n ocsp_must_staple=args.ocsp_must_staple,\n ocsp_must_staple_v2=args.ocsp_must_staple_v2,\n ocsp_urls=parse_list(args.ocsp_urls),\n crl_urls=parse_list(args.crl_urls),\n issuer_urls=parse_list(args.issuer_urls),\n permit_subtrees=parse_list(args.permit_subtrees),\n exclude_subtrees=parse_list(args.exclude_subtrees),\n ca=args.CA,\n path_length=args.path_length)",
"def get_certinfo(doc):\n\n #set a two second default timeout to recieve a cert\n socket.setdefaulttimeout(2)\n doc['ssl'] = {} \n\n try:\n cert = ssl.get_server_certificate((doc['hostname'], 443))\n #sometimes certs come back as unicode so cast to str() aka ascii\n cert = M2Crypto.X509.load_cert_string(str(cert))\n\n except:\n syslog.syslog('[*] Failed to get ssl certificate from %s' % doc['hostname'])\n print('[*] Failed to get ssl certificate from %s' % doc['hostname'])\n #lets remove the ssl key and return the doc untouched\n doc.pop('ssl')\n return doc\n\n\n #get creation date\n doc['ssl']['created'] = cert.get_not_before().get_datetime().isoformat()\n #get not valid after, aka expiration data\n doc['ssl']['expire'] = cert.get_not_after().get_datetime().isoformat()\n #get issuer information\n doc['ssl']['issuer'] = cert.get_issuer().as_text()\n #get subject information\n doc['ssl']['subject'] = cert.get_subject().as_text()\n #get keysize, size() returns in bytes, so we multiply * 8 to get the number of bits\n doc['ssl']['keysize'] = cert.get_pubkey().size() * 8\n #get cert fingerprint for comparison\n doc['ssl']['fingerprint'] = cert.get_fingerprint()\n\n return doc",
"def asn1_loads(asn1_str):\n\n # ASN.1 grammar\n identifier = pp.Word(pp.alphas + \"_\")\n assign = pp.Literal(\"::=\")\n # typedef = identifier.setName(\"typeref\") + assign + identifier.setName(\"basetype\")\n comment1 = pp.Literal(\"#\") + pp.originalTextFor(pp.SkipTo(pp.LineEnd()))\n # typelist = pp.OneOrMore(typedef)\n meta1 = pp.LineStart() + identifier + pp.Literal(\":\") + pp.SkipTo(pp.LineEnd()).setDebug()\n meta2 = pp.LineStart() + pp.White() + pp.SkipTo(pp.LineEnd()).setDebug()\n metaval = meta1 + pp.ZeroOrMore(meta2)\n # metalist = pp.ZeroOrMore(comment1) + pp.Literal(\"/*\") + pp.OneOrMore(metaval) + pp.Literal(\"*/\")\n metalist = pp.SkipTo(pp.Literal(\"/*\")).setDebug() + pp.Literal(\"/*\") + pp.OneOrMore(\n metaval).setDebug() + pp.Literal(\"*/\")\n\n asn1 = metalist.parseString(asn1_str, parseAll=False)\n print(asn1)\n jaen = {\"meta\": {}, \"types\": []}\n return jaen",
"def parse(obj):\n data = json.loads(obj)\n cryptopars = init_crypto_args(**data)\n return cryptopars\n # return cipherdata(cryptopars, **data)",
"def verify(self, ta):\n\n self.decode(CMS_object.verify(self, ta))\n return self.get_content()",
"def _check_certificate(public_cert_content, priv_key_content,\n domain=None, at_time=None):\n result = {}\n # Read the private key and public certificate\n try:\n priv_key = OpenSSL.crypto.load_privatekey(\n OpenSSL.crypto.FILETYPE_PEM, priv_key_content)\n except OpenSSL.crypto.Error as err:\n result.update({'ssl_certificate_key': {\n 'state': 'invalid', 'detail': str(err)}})\n priv_key = None\n\n try:\n public_cert = OpenSSL.crypto.load_certificate(\n OpenSSL.crypto.FILETYPE_PEM, public_cert_content)\n except OpenSSL.crypto.Error as err:\n result.update({'ssl_certificate': {\n 'state': 'invalid', 'detail': str(err)}})\n public_cert = None\n\n if priv_key and public_cert:\n context = OpenSSL.SSL.Context(OpenSSL.SSL.TLSv1_METHOD)\n context.use_privatekey(priv_key)\n context.use_certificate(public_cert)\n try:\n context.check_privatekey()\n except OpenSSL.SSL.Error:\n result.update({'ssl_certificate': {'state': 'invalid',\n 'detail': \"certificate does not match private key.\"}})\n\n if result:\n raise RuntimeError(result)\n\n not_after = public_cert.get_notAfter()\n if not isinstance(not_after, six.string_types):\n not_after = not_after.decode('utf-8')\n not_after = datetime.datetime.strptime(not_after, \"%Y%m%d%H%M%SZ\")\n common_name = public_cert.get_subject().commonName\n alt_names = []\n for ext_idx in range(0, public_cert.get_extension_count()):\n extension = public_cert.get_extension(ext_idx)\n if extension.get_short_name().decode('utf-8') == 'subjectAltName':\n # data of the X509 extension, encoded as ASN.1\n decoded_alt_names, _ = asn1_decoder(\n extension.get_data(), asn1Spec=SubjectAltName())\n for alt in nat_encoder(decoded_alt_names):\n alt_name = alt['dNSName'].decode('utf-8')\n if alt_name != common_name:\n alt_names += [alt_name]\n if domain:\n found = False\n for alt_name in [common_name] + alt_names:\n regex = alt_name.replace('.', r'\\.').replace('*', r'.*') + '$'\n if re.match(regex, domain) or alt_name == domain:\n found = True\n break\n if not found:\n result.update({'ssl_certificate': {'state': 'invalid',\n 'detail': \"domain name (%s) does not match common or alt names\"\\\n \" present in certificate (%s, %s).\" % (\n domain, common_name, ','.join(alt_names))}})\n if at_time:\n if not_after <= at_time:\n result.update({'ssl_certificate': {'state': 'invalid',\n 'detail': \"certificate is only valid until %s.\" % not_after}})\n\n if result:\n raise RuntimeError(result)\n\n result.update({'ssl_certificate': {\n 'common_name': common_name,\n 'alt_names': alt_names,\n 'state': result.get('ssl_certificate', {}).get('state', 'valid'),\n 'issuer': public_cert.get_issuer().organizationName,\n 'ends_at': not_after.isoformat()}})\n return result",
"def parsePrivateKey(s):\r\n return parsePEMKey(s, private=True)",
"def _ParseCertificateArguments(client, args):\n self_managed = None\n managed = None\n certificate_type = None\n if args.certificate:\n certificate_type = \\\n client.messages.SslCertificate.TypeValueValuesEnum.SELF_MANAGED\n certificate = files.ReadFileContents(args.certificate)\n private_key = files.ReadFileContents(args.private_key)\n self_managed = client.messages.SslCertificateSelfManagedSslCertificate(\n certificate=certificate, privateKey=private_key)\n if args.domains:\n certificate_type = \\\n client.messages.SslCertificate.TypeValueValuesEnum.MANAGED\n managed = client.messages.SslCertificateManagedSslCertificate(\n domains=args.domains)\n return certificate_type, self_managed, managed",
"def test_rsa_ca(self):\n key = c.KEY_RSA\n usage = [\n c.KU_DIGITALSIGNATURE,\n c.KU_NONREPUDIATION,\n c.KU_KEYENCIPHERMENT,\n c.KU_DATAENCIPHERMENT,\n c.KU_KEYCERTSIGN,\n c.KU_CRLSIGN,\n ]\n self.assertTrue(utils.check_key_usage(key, usage, True))",
"def check_valid_request_ca(self):\n\n self.check_valid_request_common()\n\n alg = self.get_POW().getSignatureAlgorithm()\n bc = self.get_POW().getBasicConstraints()\n eku = self.get_POW().getEKU()\n sia = self.get_POW().getSIA()\n\n if alg != rpki.oids.sha256WithRSAEncryption:\n raise rpki.exceptions.BadPKCS10(\"PKCS #10 has bad signature algorithm for CA: %s\" % alg)\n\n if bc is None or not bc[0] or bc[1] is not None:\n raise rpki.exceptions.BadPKCS10(\"PKCS #10 CA bad basicConstraints\")\n\n if eku is not None:\n raise rpki.exceptions.BadPKCS10(\"PKCS #10 CA EKU not allowed\")\n\n if sia is None:\n raise rpki.exceptions.BadPKCS10(\"PKCS #10 CA SIA missing\")\n\n caRepository, rpkiManifest, signedObject, rpkiNotify = sia\n\n logger.debug(\"check_valid_request_ca(): sia: %r\", sia)\n\n if signedObject:\n raise rpki.exceptions.BadPKCS10(\"PKCS #10 CA SIA must not have id-ad-signedObject\")\n\n if not caRepository:\n raise rpki.exceptions.BadPKCS10(\"PKCS #10 CA SIA must have id-ad-caRepository\")\n\n if not any(uri.startswith(\"rsync://\") for uri in caRepository):\n raise rpki.exceptions.BadPKCS10(\"PKCS #10 CA SIA id-ad-caRepository contains no rsync URIs\")\n\n if any(uri.startswith(\"rsync://\") and not uri.endswith(\"/\") for uri in caRepository):\n raise rpki.exceptions.BadPKCS10(\"PKCS #10 CA SIA id-ad-caRepository does not end with slash\")\n\n if not rpkiManifest:\n raise rpki.exceptions.BadPKCS10(\"PKCS #10 CA SIA must have id-ad-rpkiManifest\")\n\n if not any(uri.startswith(\"rsync://\") for uri in rpkiManifest):\n raise rpki.exceptions.BadPKCS10(\"PKCS #10 CA SIA id-ad-rpkiManifest contains no rsync URIs\")\n\n if any(uri.startswith(\"rsync://\") and uri.endswith(\"/\") for uri in rpkiManifest):\n raise rpki.exceptions.BadPKCS10(\"PKCS #10 CA SIA id-ad-rpkiManifest ends with slash\")\n\n if any(not uri.startswith(\"http://\") and not uri.startswith(\"https://\") for uri in rpkiNotify):\n raise rpki.exceptions.BadPKCS10(\"PKCS #10 CA SIA id-ad-rpkiNotify neither HTTP nor HTTPS\")",
"def ver_dec_content(parts, sign_key=None, enc_key=None, sign_alg=\"SHA256\"):\n\n if parts is None:\n return None\n elif len(parts) == 3:\n # verify the cookie signature\n timestamp, load, b64_mac = parts\n mac = base64.b64decode(b64_mac)\n verifier = HMACSigner(algorithm=sign_alg)\n if verifier.verify(\n load.encode(\"utf-8\") + timestamp.encode(\"utf-8\"), mac, sign_key.key\n ):\n return load, timestamp\n else:\n raise VerificationError()\n elif len(parts) == 4:\n b_timestamp = parts[0]\n iv = base64.b64decode(parts[1])\n ciphertext = base64.b64decode(parts[2])\n tag = base64.b64decode(parts[3])\n\n decrypter = AES_GCMEncrypter(key=enc_key.key)\n try:\n msg = decrypter.decrypt(ciphertext, iv, tag=tag)\n except InvalidTag:\n return None\n\n p = lv_unpack(msg.decode(\"utf-8\"))\n load = p[0]\n timestamp = p[1]\n if len(p) == 3:\n verifier = HMACSigner(algorithm=sign_alg)\n if verifier.verify(\n load.encode(\"utf-8\") + timestamp.encode(\"utf-8\"),\n base64.b64decode(p[2]),\n sign_key.key,\n ):\n return load, timestamp\n else:\n return load, timestamp\n return None",
"def extract (msgfile, key):\n m = email.message_from_file(msgfile)\n From, To, Subject, Date = caption(m)\n #Text, Html, Files, Parts = pullout(m, key)\n Text = Text.strip(); Html = Html.strip()\n msg = {\"subject\": Subject, \"from\": From, \"to\": To, \"date\": Date,\n \"text\": Text, \"html\": Html, \"parts\": Parts}\n if Files: msg[\"files\"] = Files\n return msg",
"def fetch_cert(source, entry, s3_client):\n if source == \"s3\":\n bucket_and_key = parse_s3_url(entry)\n logger.info(\"...reading s3 source = {}\".format(bucket_and_key))\n pem_cert = s3_client.get_object(\n Bucket=bucket_and_key[\"bucket\"], Key=bucket_and_key[\"key\"]\n )\n pem_cert_body = pem_cert[\"Body\"].read()\n elif source == \"memory\":\n logger.info(\"...reading from memory\")\n pem_cert_body = entry\n else:\n raise ValueError(\n \"Invalid cert entry type {}, \" \"must be one of s3, memory\".format(source)\n )\n\n # Python3 will return a byte string, Python2 will return a string\n if type(pem_cert_body) == bytes:\n pem_cert_body = pem_cert_body.decode(\"utf-8\")\n\n return pem_cert_body"
] | [
"0.6905935",
"0.519228",
"0.5144184",
"0.5090635",
"0.4969031",
"0.49095032",
"0.48951414",
"0.48807597",
"0.4877804",
"0.48451734",
"0.4826676",
"0.48249158",
"0.48138803",
"0.4775471",
"0.47672594",
"0.4758738",
"0.47264582",
"0.47241712",
"0.46494445",
"0.4630516",
"0.46156985",
"0.45940387",
"0.4527909",
"0.44569722",
"0.44397488",
"0.44365314",
"0.44193998",
"0.43797928",
"0.4379079",
"0.43483678"
] | 0.6486718 | 1 |
Process and return selected confounds from the confounds file | def _select_confounds(confounds_file, selected_confounds):
import pandas as pd
import numpy as np
confounds_df = pd.read_csv(confounds_file, sep='\t', na_values='n/a')
# fill the first value of FramewiseDisplacement with the mean.
if 'FramewiseDisplacement' in selected_confounds:
confounds_df['FramewiseDisplacement'] = confounds_df['FramewiseDisplacement'].fillna(
np.mean(confounds_df['FramewiseDisplacement']))
desired_confounds = confounds_df[selected_confounds]
return desired_confounds | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _select_confounds(confounds_file, selected_confounds):\n import pandas as pd\n import numpy as np\n import re\n\n confounds_df = pd.read_csv(confounds_file, sep='\\t', na_values='n/a')\n # regular expression to capture confounds specified at the command line\n confound_expr = re.compile(r\"|\".join(selected_confounds))\n expanded_confounds = list(filter(confound_expr.fullmatch, confounds_df.columns))\n imputables = ('framewise_displacement', 'std_dvars', 'dvars', '.*derivative1.*')\n\n # regular expression to capture all imputable confounds\n impute_expr = re.compile(r\"|\".join(imputables))\n expanded_imputables = list(filter(impute_expr.fullmatch, expanded_confounds))\n for imputable in expanded_imputables:\n vals = confounds_df[imputable].values\n if not np.isnan(vals[0]):\n continue\n # Impute the mean non-zero, non-NaN value\n confounds_df[imputable][0] = np.nanmean(vals[vals != 0])\n\n desired_confounds = confounds_df[expanded_confounds]\n # check to see if there are any remaining nans\n if desired_confounds.isna().values.any():\n msg = \"The selected confounds contain nans: {conf}\".format(conf=expanded_confounds)\n raise ValueError(msg)\n return desired_confounds",
"def read_conect(self):\n self.conect_section = []\n if not self.lines:\n self.read_lines()\n for line in self.lines:\n if \"CONECT\" in line[0:6]:\n self.conect_section.append(line)",
"def get_convos():\n file_path = os.path.join(config.DATA_PATH, config.CONVO_FILE)\n convos = []\n with open(file_path, 'rb') as f:\n for line in f.readlines():\n parts = line.split(b' +++$+++ ')\n if len(parts) == 4:\n convo = []\n for line in parts[3][1:-2].split(b', '):\n convo.append(line[1:-1])\n convos.append(convo)\n\n return convos",
"def _read_conll(cls, input_file):\n #def read_conll(input_file):\n sents = []\n sent, labels = [], []\n for line in open(input_file):\n if line.startswith(\"# sent_id\"):\n current_id = line.strip().split(\" = \")[1]\n elif line.strip() == \"\":\n if len(sent) > 0:\n sents.append((current_id, sent, labels))\n sent, labels = [], []\n else:\n token, label = line.strip().split(\"\\t\")\n sent.append(token)\n labels.append(label)\n return sents",
"def main():\n processSetOfCerFiles(sys.argv[1:])",
"def getConc(fileID, spc):\r\n\r\n dataKey = rmn.fstinf(fileID, nomvar=spc, ip1=ip1)['key']\r\n dataRec = rmn.fstluk(dataKey)\r\n concData = dataRec['d']\r\n return concData, dataKey, dataRec",
"def semcor2conc(args):\r\n input_files = list_files(*args.input_files)\r\n types = list(args.types)\r\n output_file = args.output_file or output_default / '{}_conc.csv'.format('_'.join(types))\r\n output_file = Path(output_file)\r\n left_context = args.left\r\n right_context = args.right\r\n separator = args.separator\r\n filter_pos = args.pos\r\n kind_id = args.kind_id\r\n with output_file.open('w') as file:\r\n x = 'last\\tnext\\tlemma' if args.add_closest else 'lemma'\r\n file.write('\\t'.join(['concordance', 'file', 'token_id', 'left', 'wordform', 'right', x, 'pos', 'sense_key\\n']))\r\n for input_file in input_files:\r\n corpus_file = CorpusFile(input_file)\r\n tokenlist = list(generate_tokenlist(corpus_file.text))\r\n chosen_words = [index for (index, token) in enumerate(tokenlist) if token.lemma in types]\r\n for word in chosen_words:\r\n node = tokenlist[word]\r\n pos = node.pos\r\n if filter_pos and not re.match(r'{}'.format([x for x in filter_pos]), pos):\r\n continue\r\n if kind_id == 'lemma_pos':\r\n wordtype = '/'.join([node.lemma, node.pos])\r\n elif kind_id == 'wordform':\r\n wordtype = node.wordform\r\n else:\r\n wordtype = node.lemma\r\n token_id = '/'.join([wordtype, corpus_file.shortname, str(word + 1)])\r\n left, right = generate_context(tokenlist, word, left_context, right_context, separator, len(tokenlist))\r\n if args.add_closest:\r\n last = tokenlist[word-1].wordform\r\n following = tokenlist[word+1].wordform\r\n line = [corpus_file.concordance, corpus_file.shortname, token_id, left, node.wordform, right, last, following, node.lemma, pos, node.sense_key or 'NA']\r\n else:\r\n line = [corpus_file.concordance, corpus_file.shortname, token_id, left, node.wordform, right, node.lemma, pos, node.sense_key or 'NA']\r\n file.write('\\t'.join(line) + '\\n')\r\n print('File \"{}\" processed.'.format(input_file.stem))",
"def read_confounds(filename, confounds):\n df_confounds = pandas.read_csv(filename, sep='\\t', usecols=confounds)\n return df_confounds",
"def _load_confounds_main(\n confounds_raw, strategy=[\"minimal\"], n_components=0.95, motion_model=\"6params\"\n):\n\n # Convert tsv file to pandas dataframe\n if not isinstance(confounds_raw, pd.DataFrame):\n confounds_raw = pd.read_csv(confounds_raw, delimiter=\"\\t\", encoding=\"utf-8\")\n\n # Add chosen confounds based on strategy to dataframe\n confounds_of_interest = set()\n confounds_out = pd.DataFrame()\n\n for strat in strategy:\n if strat in confound_dict.keys():\n\n confounds_of_interest |= set(_confound_strat(strat, confounds_raw))\n else:\n confounds_of_interest.add(strat)\n\n # Remove motion confounds and concatenate columns to confounds_out\n non_motion_confounds = [\n conf\n for conf in confounds_of_interest\n if ((\"rot\" not in conf) and (\"trans\" not in conf))\n ]\n\n confounds_out = pd.concat(\n (confounds_out, confounds_raw[list(non_motion_confounds)]), axis=1\n )\n\n # Apply PCA on motion confounds\n motion_bool = set(motion_6params) & confounds_of_interest\n if motion_bool:\n confounds_out = _pca_motion(\n confounds_out, confounds_raw, n_components, motion_model,\n )\n\n return confounds_out",
"def select_confounds(subject_id, run_num):\n confounds_dir = f'/data/sub-%02d/func/' % int(subject_id)\n confounds_file = confounds_dir+f'sub-%02d_task-tsl_run-%d_desc-confounds_timeseries.tsv' % (int(subject_id), int(run_num))\n conf_df = pd.read_csv(confounds_file, sep='\\t')\n return conf_df",
"def fetch_corpous_from_file(filepath):\n f = open(filepath, 'r')\n corpus_text = f.read() \n corpus_sentence_list = corpus_text.lower().split('.')\n corpus_list_sent_processed = [remove_special_chars(item) for item in corpus_sentence_list if len(item)>1] \n return corpus_list_sent_processed",
"def handle(self):\n\n # We generate the status file with the catched status.\n Generate(self.catched, \"SYNTAX\").status_file()\n\n # We return the parsed status.\n return self.catched",
"def select_confounds(subject_id, run_num):\n confounds_dir = f'/data/sub-%02d/func/' % int(subject_id)\n confounds_file = confounds_dir+f'sub-%02d_task-tsl_run-%d_desc-confounds_timeseries.tsv' % (int(subject_id), int(run_num))\n conf_df = pd.read_csv(confounds_file, sep='\\t')\n return conf_df",
"def cat(config, input):\n for file in input:\n while True:\n output = file.read()\n if not output:\n break\n m = SearchMatches(file, output, config.regex, config.color, config.underline)\n m.print_match_lines()",
"def read_conll_file(file_name):\n data = []\n current_words = []\n current_tags = []\n\n for line in codecs.open(file_name, encoding='utf-8'):\n line = line.strip()\n \n if line:\n if line[0] == '#':\n continue # skip comments\n tok = line.split('\\t')\n if '-' in tok[0] or '.' in tok[0]:\n continue # skip special tokenized words\n word = tok[1]\n tag = tok[3]\n \n current_words.append(word)\n current_tags.append(tag)\n else:\n if current_words: # skip empty lines\n data.append((current_words, current_tags))\n current_words = []\n current_tags = []\n\n # check for last one\n if current_tags != [] and not raw:\n data.append((current_words, current_tags))\n return data",
"def make_conc_obj_from_conclines(conc_results):\n from corpkit.interrogation import Concordance\n all_conc_lines = []\n for sc_name, resu in sorted(conc_results.items()):\n if only_unique:\n unique_results = uniquify(resu)\n else:\n unique_results = resu\n #make into series\n if PYTHON_VERSION == 2:\n pindex = 'c f s l m r'.encode('utf-8').split()\n else:\n pindex = 'c f s l m r'.split()\n for fname, spkr, start, word, end in unique_results:\n #spkr = str(spkr, errors = 'ignore')\n fname = os.path.basename(fname)\n ser = [sc_name, fname, spkr, start, word, end]\n all_conc_lines.append(Series(ser, index=pindex))\n\n if random:\n from random import shuffle\n shuffle(all_conc_lines)\n\n try:\n conc_df = pd.concat(all_conc_lines, axis=1).T\n if all(x == '' for x in list(conc_df['s'].values)):\n conc_df.drop('s', axis=1, inplace=True)\n \n if show_ngram or show_collocates:\n if not language_model:\n counted = Counter(conc_df['m'])\n indices = [l for l in list(conc_df.index) if counted[conc_df.ix[l]['m']] > 1] \n conc_df = conc_df.ix[indices]\n conc_df = conc_df.reset_index(drop=True)\n\n locs['corpus'] = corpus.name\n conc_df = Concordance(conc_df)\n try:\n conc_df.query = locs\n except AttributeError:\n pass\n return conc_df\n\n except ValueError:\n return",
"def get_convos():\n # returns array of arrays with line data from movie_conversations.txt\n # ex. convos = [['L194', 'L195', 'L196'], ['L198', L'199']]\n file_path = os.path.join(config.DATA_PATH, config.CONVO_FILE)\n convos = []\n with open(file_path, 'rb') as f:\n for line in f.readlines():\n parts = line.split(' +++$+++ ')\n if len(parts) == 4:\n convo = []\n for line in parts[3][1:-2].split(', '):\n convo.append(line[1:-1])\n convos.append(convo)\n\n return convos",
"def main():\n # call open_file() to get file pointer \n fd = open_file()\n # call fill completion to get dict, then close the openned file\n full_set = create_dict(fd)\n wrds = find_words(full_set)\n print(wrds)\n fd.close()\n # ask for a prefix in while loop",
"def gather_candidates(self, context):\n candidates = []\n\n with open(context['data_file'], 'r') as fp:\n try:\n config = load(fp)\n except JSONDecodeError:\n err_string = 'Decode error for' + context['data_file']\n error(self.vim, err_string)\n config = []\n\n for obj in config:\n candidates.append({\n 'word': obj['option'],\n '__option': obj['option'],\n '__shortname': obj['shortname'],\n '__description': obj['description'],\n 'abbr': f\"{obj['option']:<15}│{obj['shortname']:<10}│{obj['description']:<15}\",\n })\n\n return candidates",
"def load_confounds(\n confounds_raw, strategy=[\"minimal\"], n_components=0.95, motion_model=\"6params\"\n):\n if type(confounds_raw) == str:\n confounds_out = _load_confounds_helper(\n confounds_raw,\n strategy=strategy,\n n_components=n_components,\n motion_model=motion_model,\n )\n\n elif type(confounds_raw) == list:\n confounds_out = []\n for file in confounds_raw:\n confounds_out.append(\n _load_confounds_helper(\n file,\n strategy=strategy,\n n_components=n_components,\n motion_model=motion_model,\n )\n )\n\n else:\n confounds_out = 0\n raise ValueError(\"Invalid input type\")\n\n return confounds_out",
"def _parse_relevant_lines(cls, conf_file_path):\n # Make a dictionary with the keys of find_words corresponding with\n # empty array as a place holder.\n relevant_lines = dict([(word, []) for word in cls.FIND_WORDS])\n # Now locate the relevant lines in this file and keep the found\n # pattern matches.\n with open(conf_file_path, 'r') as config:\n for line in config:\n # Strip whitespaces\n line = line.strip(\" \\t\")\n # Skip comment lines..\n if line.startswith('#'):\n continue\n for word, pattern in cls.FIND_WORDS.items():\n if \"{} \".format(word) not in line:\n continue\n matches = pattern.findall(line)\n if matches:\n # We only need the first capturing group.\n matches = [match[0].strip(\" \\t\") for match in matches]\n # We will only need the matched strings later on.\n relevant_lines[word] += matches\n return relevant_lines",
"def readFileToCorpus(f):\n if os.path.isfile(f):\n file = open(f, \"r\") # open the input file in read-only mode\n i = 0 # this is just a counter to keep track of the sentence numbers\n corpus = [] # this will become a list of sentences\n print(\"Reading file \", f)\n for line in file:\n i += 1\n sentence = line.split() # split the line into a list of words\n #append this lis as an element to the list of sentences\n corpus.append(sentence)\n if i % 1000 == 0:\n #print a status message: str(i) turns int i into a string\n #so we can concatenate it\n sys.stderr.write(\"Reading sentence \" + str(i) + \"\\n\")\n #endif\n #endfor\n return corpus\n else:\n #ideally we would throw an exception here, but this will suffice\n print(\"Error: corpus file \", f, \" does not exist\")\n sys.exit() # exit the script\n #endif",
"def extract_programs(outputf):\t\n programs = []\n with open(outputf,'r') as f:\n\t combo_lines = f.readlines()\n for combo_line in combo_lines:\n combo = combo_line.split(' ',1)[1]\n\t programs.append(combo)\n return programs",
"def parsec(formatted_file, pattern_tree):\n pattern_path = []\n result_tree = {}\n result_path = []\n for line in formatted_file:\n search(line, pattern_tree, pattern_path, result_tree, result_path)\n return result_tree",
"def _gather_confounds(\n signals=None,\n dvars=None,\n std_dvars=None,\n fdisp=None,\n rmsd=None,\n motion=None,\n newpath=None,\n):\n all_files = []\n confounds_list = []\n for confound, name in (\n (signals, \"Global signals\"),\n (std_dvars, \"Standardized DVARS\"),\n (dvars, \"DVARS\"),\n (fdisp, \"Framewise displacement\"),\n (rmsd, \"RMSD\"),\n (motion, \"Motion parameters\"),\n ):\n if confound is not None and isdefined(confound):\n confounds_list.append(name)\n if os.path.exists(confound) and os.stat(confound).st_size > 0:\n all_files.append(confound)\n\n confounds_data = pd.DataFrame()\n for file_name in all_files: # assumes they all have headings already\n new = pd.read_csv(file_name, sep=\"\\t\")\n for column_name in new.columns:\n new.rename(\n columns={column_name: _camel_to_snake(_less_breakable(column_name))}, inplace=True\n )\n\n confounds_data, new = _adjust_indices(confounds_data, new)\n confounds_data = pd.concat((confounds_data, new), axis=1)\n\n if newpath is None:\n newpath = os.getcwd()\n\n combined_out = os.path.join(newpath, \"confounds.tsv\")\n confounds_data.to_csv(combined_out, sep=\"\\t\", index=False, na_rep=\"n/a\")\n\n return combined_out, confounds_list",
"def process_corpus(self):\n sentences = []\n sentence = []\n with open(str(self.file), encoding=self.encoding) as f:\n\n line = f.readline()\n\n while line:\n\n if line.startswith(\"#\"):\n line = f.readline()\n continue\n\n if line.strip().replace(\"\", \"\") == \"\":\n if len(sentence) > 0:\n self.infer_space_after(sentence)\n if self.tagging_scheme is not None:\n self.convert_tag_scheme(\n sentence, target_scheme=\"iobes\"\n )\n\n sentences.append(sentence)\n sentence = []\n\n else:\n fields = re.split(r\"\\s+\", line)\n token = fields[0] # text column\n token_tags = {\n v: fields[k]\n for k, v in self.columns.items()\n if v != \"text\"\n }\n sentence.append({\"name\": token, \"tags\": token_tags})\n\n line = f.readline()\n\n return sentences",
"def getComicsListFromFile(filename):\n h = open(filename)\n contents = \"\\n\".join(h.readlines())\n expr = re.compile(\"([a-z0-9]+)\")\n return expr.findall(contents)",
"def collect_confs(self):\n\n sim_confs = []\n failed_sims = []\n solfile = self.gconf['General']['solution_file']\n # Find the data files and instantiate Simulation objects\n base = os.path.expandvars(self.gconf['General']['base_dir'])\n self.log.info(base)\n for root, dirs, files in os.walk(base):\n conf_path = os.path.join(root, 'sim_conf.yml')\n if 'sim_conf.yml' in files and solfile in files:\n self.log.info('Gather sim at %s', root)\n # sim_obj = Simulation(Config(conf_path))\n conf = Config(conf_path)\n # sim_obj.conf.expand_vars()\n sim_confs.append(conf)\n elif 'sim_conf.yml' in files:\n # sim_obj = Simulation(Config(conf_path))\n conf = Config(conf_path)\n self.log.error('Sim %s is missing its data file',\n conf['General']['sim_dir'])\n failed_sims.append(conf)\n self.sim_confs = sim_confs\n self.failed_sims = failed_sims\n if not sim_confs:\n self.log.error('Unable to find any successful simulations')\n raise RuntimeError('Unable to find any successful simulations')\n return sim_confs, failed_sims",
"def _FindTarget(self):\n ret = []\n for filename in self._Walk(self._main_directory, \".tex\"):\n skip, cache = self._CacheDataAndSkip(filename)\n if skip:\n ret.extend(cache)\n continue\n\n resp = []\n for i, line in enumerate(codecs.open(filename, 'r', 'utf-8')):\n line = line.rstrip()\n match = re.search(self.collect_regex, line)\n if match is not None:\n lid = re.sub(\".*\" + self.collect_regex + \".*\", r\"\\1\", line)\n if not lid in ret and not lid in resp:\n resp.append( lid )\n #TODO- make it an option if we want gotos for\n #this completion\n self._goto_labels[lid] = (filename, i+1, match.start(1))\n\n self._cached_data[filename] = resp\n ret.extend(resp)\n \"\"\"\n we moved the building of completes to here so we can\n share a cache between square and curly brackets\n \"\"\"\n temp = []\n for i in ret:\n tempo = self.BuildOurCompletes(i)\n temp.append( tempo )\n return temp",
"def load_conll(path, exclude=False, file_encoding='utf-8'):\n corpus = []\n\n with open(path) as f:\n sent = []\n for line in f:\n es = line.rstrip().split()\n if len(es) > 1:\n word = es[0].decode(file_encoding).lower()\n# word = RE_NUM.sub(u'0', word)\n tag = es[1].decode(file_encoding)\n syn = es[2].decode(file_encoding)\n ne = es[3].decode(file_encoding) # you can ingore 1-3 for n2n SRL task, but we parse here just in case\n prd = es[4].decode(file_encoding)#Target\n prop = []\n\n if len(es) > 5:\n prop = es[5:]\n sent.append((word, tag, syn, ne, prd, prop))\n else:\n if exclude and (len(sent[0][5]) == 0 or len(sent) < 2):\n pass\n else:\n corpus.append(sent)\n sent = []\n\n if sent:\n corpus.append(sent)\n\n return corpus"
] | [
"0.65061325",
"0.5781993",
"0.5781708",
"0.56171685",
"0.55821574",
"0.55591077",
"0.55106205",
"0.5485491",
"0.5484373",
"0.5472426",
"0.54602283",
"0.54551107",
"0.5445359",
"0.5434916",
"0.5391505",
"0.5335677",
"0.53314865",
"0.53281003",
"0.5295112",
"0.52804834",
"0.52786887",
"0.5269188",
"0.5259035",
"0.52532804",
"0.52441585",
"0.5222865",
"0.5203549",
"0.5193858",
"0.51710933",
"0.51574117"
] | 0.6170222 | 1 |
Return a cached copy of TestShib's metadata with a cacheDuration attribute | def cache_duration_metadata_callback(_request, _uri, headers):
return (200, headers, self.read_data_file('testshib_metadata_with_cache_duration.xml')) # lint-amnesty, pylint: disable=no-member | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_metadata(self) -> Metadata:\n manifest = self._get_manifest()\n\n return Metadata(**manifest[\"metadata\"])",
"def get_metadata(self):\n return copy.copy(self.metadata)",
"def cache_data(self):\n # Initialize key variables\n result = self.data['cache_data']\n return result",
"def test_cache_datastore_manifests(self, cache_audio: bool):\n # Data setup\n random_seed = 42\n sample_rate = 16000\n num_examples = 10\n num_manifests = 2\n data_duration = 1.0\n\n # Generate random signals\n _rng = np.random.default_rng(seed=random_seed)\n\n # Input and target signals have the same duration\n data_duration_samples = int(data_duration * sample_rate)\n\n with tempfile.TemporaryDirectory() as test_dir:\n test_store_dir = os.path.join(test_dir, 'store')\n os.mkdir(test_store_dir)\n\n # Prepare metadata and audio files\n manifest_filepaths = []\n audio_files = []\n for m in range(num_manifests):\n manifest_dir = os.path.join(test_store_dir, f'manifest_{m}')\n os.mkdir(manifest_dir)\n manifest_filepath = os.path.join(manifest_dir, 'manifest.json')\n\n metadata = []\n data = _rng.uniform(low=-0.5, high=0.5, size=(data_duration_samples, num_examples))\n for n in range(num_examples):\n audio_filepath = f'manifest_{m}_audio_{n:02d}.wav'\n audio_file = os.path.join(manifest_dir, audio_filepath)\n # Write audio file\n sf.write(audio_file, data[:, n], sample_rate, 'float')\n # Update metadata\n metadata.append(\n {\n 'audio_filepath': audio_filepath,\n 'duration': data_duration,\n 'text': f'text for example {n:02d}',\n }\n )\n # Update audio files\n audio_files.append(audio_file)\n\n # Save manifest\n write_manifest(manifest_filepath, metadata)\n manifest_filepaths.append(manifest_filepath)\n\n # Cache location\n test_cache_dir = os.path.join(test_dir, 'cache')\n\n # Instead of using AIS, copy object from store dir to cache dir\n def fake_get(self):\n # Object path relative to store path\n object_path = os.path.relpath(self.store_path, start=test_store_dir)\n # Copy to fake local path\n self._local_path = os.path.join(test_cache_dir, object_path)\n os.makedirs(os.path.dirname(self.local_path), exist_ok=True)\n shutil.copy(self.store_path, self.local_path)\n # Return path as in the original get\n return self.local_path\n\n with mock.patch(\n 'nemo.collections.asr.data.audio_to_text.is_datastore_path', lambda x: True\n ), mock.patch.object(DataStoreObject, 'get', fake_get):\n # Use a single worker for this test to avoid failure with mock & multiprocessing (#5607)\n cache_datastore_manifests(manifest_filepaths, cache_audio=cache_audio, num_workers=1)\n\n # Manifests need to be compared\n store_files_to_compare = manifest_filepaths\n if cache_audio:\n # Audio needs to be compared\n store_files_to_compare += audio_files\n\n # Compare files\n for f_store in store_files_to_compare:\n f_cache = os.path.join(test_cache_dir, os.path.relpath(f_store, test_store_dir))\n assert filecmp.cmp(f_store, f_cache, shallow=False), f'Files {f_store} and {f_cache} do not match.'",
"def generate_statistics():\r\n statistics = cache.get('statistics')\r\n if statistics is None:\r\n statistics = {}\r\n statistics['nr_hashtags'] = ('Number of Hashtags',\r\n get_number_hashtags())\r\n statistics['nr_tokens'] = ('Number of Tokens', get_token_count())\r\n statistics['media_storage_size'] = ('Storage Folder Size (MB)',\r\n str(get_folder_size(\r\n cfg['media_storage'])))\r\n\r\n cache.set('statistics', statistics,\r\n cfg['flask_cache_timeout'] * 60)\r\n\r\n return statistics",
"def test_set_cache_timeout():\n my_accessor = RallyAccessor('uname', 'pword', 'base_url')\n my_accessor.set_cache_timeout('object_name', 10)\n\n assert_equal(my_accessor.cache_timeouts, {'object_name': 10})",
"def extract_metadata():\n\n create_output(ARGS.out)\n index = pre.pixz.read_index(ARGS.traffic)\n\n try:\n tmp = tempfile.mkdtemp(prefix=\"ictf2017_cache_\")\n print(\"Using temporary cache for extracted files at {}\".format(tmp))\n\n file_indexes = [i for i in range(len(index))\n if (i >= ARGS.start and i <= ARGS.stop)]\n\n # a wrapper which measures execution times and calculates eta\n eta = pre.timing.ETACalculator(len(file_indexes))\n\n for count, i in enumerate(file_indexes):\n print(\"\\nProcessing index {} from [{}, {}]\"\n .format(i, min(file_indexes), max(file_indexes)))\n\n def extract_read_append_remove():\n pcapfile = pre.pixz.extract_pcap(ARGS.traffic, index[i], tmp)\n metadata = pre.pcap.read(pcapfile)\n append_output(metadata, ARGS.out)\n os.remove(pcapfile)\n\n eta.execute(count, extract_read_append_remove)\n\n finally:\n shutil.rmtree(tmp)\n print(\"Cleaned up temporary cache {}\\n\\n\".format(tmp))",
"def cache(self):\n return self.payload.setdefault(self._CACHE_ATTRIBUTE, {})",
"def cache(self):\n return {'output': self.output, 'series': self.series}",
"def test_configure_testshib_provider_with_cache_duration(self):\n kwargs = {}\n kwargs.setdefault('name', self.PROVIDER_NAME)\n kwargs.setdefault('enabled', True)\n kwargs.setdefault('visible', True)\n kwargs.setdefault('slug', self.PROVIDER_IDP_SLUG)\n kwargs.setdefault('entity_id', TESTSHIB_ENTITY_ID)\n kwargs.setdefault('metadata_source', TESTSHIB_METADATA_URL_WITH_CACHE_DURATION)\n kwargs.setdefault('icon_class', 'fa-university')\n kwargs.setdefault('attr_email', 'urn:oid:1.3.6.1.4.1.5923.1.1.1.6') # eduPersonPrincipalName\n self.configure_saml_provider(**kwargs)\n assert httpretty.is_enabled()\n num_total, num_skipped, num_attempted, num_updated, num_failed, failure_messages = fetch_saml_metadata()\n assert num_total == 1\n assert num_skipped == 0\n assert num_attempted == 1\n assert num_updated == 1\n assert num_failed == 0\n assert len(failure_messages) == 0",
"def test_custom_cache(self, mocker):\n dev = qml.device(\"default.qubit\", wires=1)\n spy = mocker.spy(qml.interfaces, \"cache_execute\")\n\n def cost(a, cache):\n with qml.queuing.AnnotatedQueue() as q:\n qml.RY(a[0], wires=0)\n qml.RX(a[1], wires=0)\n qml.expval(qml.PauliZ(0))\n\n tape = qml.tape.QuantumScript.from_queue(q)\n\n return execute(\n [tape],\n dev,\n gradient_fn=param_shift,\n cache=cache,\n )[0]\n\n custom_cache = {}\n params = jax.numpy.array([0.1, 0.2])\n jax.grad(cost)(params, cache=custom_cache)\n\n cache = spy.call_args[0][1]\n assert cache is custom_cache",
"def Metadata():\n def _CreateMetadata(unused_none):\n global _metadata\n if not _metadata:\n _metadata = _GCEMetadata()\n _metadata_lock.lock(function=_CreateMetadata, argument=None)\n _metadata_lock.unlock()\n return _metadata",
"def _init():\n cache_file = _get_buckets_cache_filename()\n exp = time.time() - S3_CACHE_EXPIRE\n\n # check mtime of the buckets files cache\n metadata = None\n try:\n if os.path.getmtime(cache_file) > exp:\n metadata = _read_buckets_cache_file(cache_file)\n except OSError:\n pass\n\n if metadata is None:\n # bucket files cache expired or does not exist\n metadata = _refresh_buckets_cache_file(cache_file)\n\n return metadata",
"def get_cache(self):\n return self.cache",
"def get_cache(self):\n return self._instance._cache[self.name]",
"def get_metadata(self):\n metadata = {}\n for k in self.metadata_keys:\n metadata[k] = copy.copy(getattr(self, k))\n return metadata",
"def metadata(self):\n return copy.copy(self._metadata)",
"def GetMetadata(self):\n return self.dict['meta']",
"def metadata(self):\n return copy.deepcopy(self._metadata)",
"def metadata_processor(self):\n counts = {key: int(value) for key, value in\n self.redis.hgetall(self.metadata_cache_key).iteritems()}\n\n counts['cached'] = len(self.tweet_cache)\n\n metadata = {'counts': counts}\n log.debug(metadata)\n\n if self.is_queuing:\n rqworker.enqueue(self.metadata_processor_fct, metadata)\n else:\n self.metadata_processor_fct(metadata)",
"def metadata(self, metadata):\n return Metadata(metadata)",
"def for_shard(self) -> \"BenchmarkMetadata\":\n new_metadata = BenchmarkMetadata.create(self.suite_name)\n kwargs = asdict(new_metadata)\n keep_new_fields = [\n \"argv\",\n \"user\",\n \"hostname\",\n \"ram\",\n \"cpu_name\",\n \"cpu_count\",\n \"cpu_frequency\",\n \"gpu_names\",\n ]\n keep_old_fields = [\"timestamp\", \"run_id\"]\n must_match_fields = [\n \"suite_name\",\n \"py_ver\",\n \"tf_ver\",\n \"np_ver\",\n \"git_branch_name\",\n \"git_commit\",\n ]\n for field in keep_old_fields:\n del kwargs[field]\n for field in must_match_fields:\n assert getattr(self, field) == kwargs[field], (\n f\"Field {field} must match between new and old metadata.\"\n f\" Found {getattr(self, field)} and {kwargs[field]}\"\n )\n del kwargs[field]\n assert keep_new_fields == list(kwargs)\n\n return replace(self, **kwargs)",
"def stats(self):\n if self.__cache:\n return {\n \"size\": self.__cache.currsize,\n \"maxsize\": self.__cache.maxsize,\n \"hits\": self._hits._value.get(),\n \"miss\": self._misses._value.get(),\n }\n else:\n return super(MemoryCache, self).stats()",
"def metadata(self) -> global___SummaryMetadata:",
"def cache(self) -> Optional[Sequence['outputs.SettingsPropertiesResponseCache']]:\n return pulumi.get(self, \"cache\")",
"def getMetadata(self):\n\n # keep variables local so they are not stored in memory\n meta, units = self.getDefaultMeta()\n\n # check each available file for header information\n # sequence is important since later calls overwrite earlier ones so if a header is present in \"psd\" and\n # \"data\", the value from \"data\" will be returned\n if self.ts:\n # get header data from file\n metaTmp, unitsTmp = self.ts.getMetadata()\n\n # make sure we don't override important stuff that by accident has the same name\n self.renameKey('nSamples', 'psdNSamples', meta=metaTmp, units=unitsTmp)\n self.renameKey('dt', 'psdDt', meta=metaTmp, units=unitsTmp)\n\n # set time series unit\n unitsTmp['timeseries'] = 'V'\n\n # update the dictionaries with newly found values\n meta.update(metaTmp)\n units.update(unitsTmp)\n\n if self.psd:\n metaTmp, unitsTmp = self.psd.getMetadata()\n\n # make sure we don't override important stuff that by accident has the same name\n # also, 'nSamples' and 'samplingRate' in reality refer to the underlying timeseries data\n self.renameKey('nSamples', 'psdNSamples', meta=metaTmp, units=unitsTmp)\n self.renameKey('dt', 'psdDt', meta=metaTmp, units=unitsTmp)\n\n # set psd unit\n unitsTmp['psd'] = 'V^2 / Hz'\n\n meta.update(metaTmp)\n units.update(unitsTmp)\n\n if self.data:\n metaTmp, unitsTmp = self.data.getMetadata()\n\n # rename variables for the sake of consistency and compatibility with Matlab and because the naming is\n # confusing: samplingRate is actually the acquisition rate since the DAQ card averages the data already\n # the sampling rate should describe the actual time step between data points not something else\n if 'recordingRate' in metaTmp:\n self.renameKey('samplingRate', 'acquisitionRate', meta=metaTmp, units=unitsTmp)\n self.renameKey('recordingRate', 'samplingRate', meta=metaTmp, units=unitsTmp)\n self.renameKey('nSamples', 'nAcquisitionsPerSample', meta=metaTmp)\n\n # add trial number\n metaTmp['trial'] = self.data.getTrialNumber()\n\n # update dictionaries\n meta.update(metaTmp)\n units.update(unitsTmp)\n\n # add title string to metadata, used for plots\n self.setTitle(meta)\n\n # make sure all axes have the beadDiameter\n meta['pmY']['beadDiameter'] = meta['pmX']['beadDiameter']\n units['pmY']['beadDiameter'] = units['pmX']['beadDiameter']\n meta['aodY']['beadDiameter'] = meta['aodX']['beadDiameter']\n units['aodY']['beadDiameter'] = units['aodX']['beadDiameter']\n\n # add trap names\n meta['traps'] = meta.subDictKeys()\n\n return meta, units",
"def cache(self):\n return self.__cache",
"def cache(self):\n return self.__cache",
"def cache(self):\n return self.__cache",
"def cache(self):\n return self.__cache"
] | [
"0.5946828",
"0.5924196",
"0.5874222",
"0.58641565",
"0.58568573",
"0.5794843",
"0.57618964",
"0.5748162",
"0.5736788",
"0.5663821",
"0.5662813",
"0.5655036",
"0.5647174",
"0.56243145",
"0.55787057",
"0.5567786",
"0.55402327",
"0.5527116",
"0.55239946",
"0.55070686",
"0.54851043",
"0.54838306",
"0.54830235",
"0.54795015",
"0.5472423",
"0.5455209",
"0.5450143",
"0.5450143",
"0.5450143",
"0.5450143"
] | 0.6988418 | 0 |
Enable and configure the TestShib SAML IdP as a third_party_auth provider | def _configure_testshib_provider(self, **kwargs):
fetch_metadata = kwargs.pop('fetch_metadata', True)
assert_metadata_updates = kwargs.pop('assert_metadata_updates', True)
kwargs.setdefault('name', self.PROVIDER_NAME)
kwargs.setdefault('enabled', True)
kwargs.setdefault('visible', True)
kwargs.setdefault("backend_name", "tpa-saml")
kwargs.setdefault('slug', self.PROVIDER_IDP_SLUG)
kwargs.setdefault('entity_id', TESTSHIB_ENTITY_ID)
kwargs.setdefault('metadata_source', TESTSHIB_METADATA_URL)
kwargs.setdefault('icon_class', 'fa-university')
kwargs.setdefault('attr_email', 'urn:oid:1.3.6.1.4.1.5923.1.1.1.6') # eduPersonPrincipalName
kwargs.setdefault('max_session_length', None)
kwargs.setdefault('send_to_registration_first', False)
kwargs.setdefault('skip_email_verification', False)
saml_provider = self.configure_saml_provider(**kwargs) # pylint: disable=no-member
if fetch_metadata:
assert httpretty.is_enabled() # lint-amnesty, pylint: disable=no-member
num_total, num_skipped, num_attempted, num_updated, num_failed, failure_messages = fetch_saml_metadata()
if assert_metadata_updates:
assert num_total == 1 # lint-amnesty, pylint: disable=no-member
assert num_skipped == 0 # lint-amnesty, pylint: disable=no-member
assert num_attempted == 1 # lint-amnesty, pylint: disable=no-member
assert num_updated == 1 # lint-amnesty, pylint: disable=no-member
assert num_failed == 0 # lint-amnesty, pylint: disable=no-member
assert len(failure_messages) == 0 # lint-amnesty, pylint: disable=no-member
return saml_provider | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def enable_third_party_auth():\r\n\r\n from third_party_auth import settings as auth_settings\r\n auth_settings.apply_settings(settings.THIRD_PARTY_AUTH, settings)",
"def init_saml_auth(saml_prepared_flask_request):\n return OneLogin_Saml2_Auth(saml_prepared_flask_request, custom_base_path=app.config.get('SAML_PATH', None))",
"def init_saml_auth(req):\n auth = OneLogin_Saml2_Auth(req, custom_base_path=app.config[\"SAML_PATH\"])\n return auth",
"def __init__(self, base_url):\n module_base = \"%s/%s\" % (base_url, Saml2BackendPlugin.provider)\n sp_config = {\n \"entityid\": \"%s/proxy_sp.xml\" % module_base,\n \"service\": {\n \"sp\": {\n \"allow_unsolicited\": True,\n \"endpoints\": {\n \"assertion_consumer_service\": [\n (\"%s/acs/post\" % module_base, BINDING_HTTP_POST),\n (\"%s/acs/redirect\" % module_base, BINDING_HTTP_REDIRECT)\n ],\n }\n }\n },\n \"key_file\": TestConfiguration.get_instance().backend_key.name,\n \"cert_file\": TestConfiguration.get_instance().backend_cert.name,\n \"metadata\": {\n \"local\": TestConfiguration.get_instance().fake_idp_metadata,\n },\n\n \"xmlsec_binary\": TestConfiguration.get_instance().xmlsec_path,\n }\n config = {\"config\": sp_config,\n \"idp_entity_id\": \"https://example.com/unittest_idp.xml\",\n \"state_id\": \"saml_backend_test_id\"\n }\n\n super(Saml2BackendPlugin, self).__init__(SamlBackend, Saml2BackendPlugin.provider, config)",
"def add_tomcat7_idp():\n pass",
"def test_configure_testshib_provider_with_cache_duration(self):\n kwargs = {}\n kwargs.setdefault('name', self.PROVIDER_NAME)\n kwargs.setdefault('enabled', True)\n kwargs.setdefault('visible', True)\n kwargs.setdefault('slug', self.PROVIDER_IDP_SLUG)\n kwargs.setdefault('entity_id', TESTSHIB_ENTITY_ID)\n kwargs.setdefault('metadata_source', TESTSHIB_METADATA_URL_WITH_CACHE_DURATION)\n kwargs.setdefault('icon_class', 'fa-university')\n kwargs.setdefault('attr_email', 'urn:oid:1.3.6.1.4.1.5923.1.1.1.6') # eduPersonPrincipalName\n self.configure_saml_provider(**kwargs)\n assert httpretty.is_enabled()\n num_total, num_skipped, num_attempted, num_updated, num_failed, failure_messages = fetch_saml_metadata()\n assert num_total == 1\n assert num_skipped == 0\n assert num_attempted == 1\n assert num_updated == 1\n assert num_failed == 0\n assert len(failure_messages) == 0",
"def setup_provider(self):\n pass",
"def test_login(self):\n self._configure_testshib_provider()\n self._test_login()",
"def get_saml_auth(flask_request):\n return OneLogin_Saml2_Auth(prepare_flask_request_for_saml(flask_request), custom_base_path=app.config.get('SAML_PATH', None))",
"def __init__(self, base_url):\n idpconfig = {\n \"entityid\": \"{}/proxy.xml\".format(base_url),\n \"service\": {\n \"idp\": {\n \"endpoints\": {\n \"single_sign_on_service\": [(\"%s/%s/sso/redirect\" %\n (base_url, Saml2BackendPlugin.provider),\n BINDING_HTTP_REDIRECT),\n (\"%s/%s/sso/post\" %\n (base_url, Saml2BackendPlugin.provider),\n BINDING_HTTP_POST)]\n },\n },\n },\n \"key_file\": TestConfiguration.get_instance().frontend_key.name,\n \"cert_file\": TestConfiguration.get_instance().frontend_cert.name,\n \"metadata\": {\n \"local\": TestConfiguration.get_instance().fake_sp_metadata,\n },\n \"xmlsec_binary\": TestConfiguration.get_instance().xmlsec_path,\n }\n\n config = {\"idp_config\": idpconfig,\n \"endpoints\": Saml2FrontendPlugin.endpoints,\n \"base\": base_url,\n \"state_id\": \"saml_frontend_state_id\"}\n\n super(Saml2FrontendPlugin, self).__init__(SamlFrontend, \"Saml2IDP\", config)",
"def setup(cls, transport_config):\n cls.we_are_initiator = transport_config.weAreClient\n\n # Check for shared-secret in the server transport options.\n transport_options = transport_config.getServerTransportOptions()\n if transport_options and \"shared-secret\" in transport_options:\n log.debug(\"Setting shared-secret from server transport options: '%s'\", transport_options[\"shared-secret\"])\n cls.shared_secret = transport_options[\"shared-secret\"]",
"def enable_sso(DirectoryId=None, UserName=None, Password=None):\n pass",
"def test_custom_provider_setting(\n config,\n):\n sms = YesssSMS.YesssSMS(\n LOGIN,\n YESSS_PASSWD,\n custom_provider={\n \"LOGIN_URL\": \"https://example.com/login\",\n \"LOGOUT_URL\": \"https://example.com/logout\",\n \"KONTOMANAGER_URL\": \"https://example.com/kontomanager\",\n \"WEBSMS_FORM_URL\": \"https://example.com/send_websms\",\n \"SEND_SMS_URL\": \"https://example.com/websms\",\n },\n )\n assert sms._login_url == \"https://example.com/login\"\n assert sms._logout_url == \"https://example.com/logout\"\n assert sms._kontomanager == \"https://example.com/kontomanager\"\n assert sms._sms_form_url == \"https://example.com/send_websms\"\n assert sms._send_sms_url == \"https://example.com/websms\"",
"def test_externalauth_login_required_course_context(self):\r\n TARGET_URL = reverse('courseware', args=[self.course.id.to_deprecated_string()]) # pylint: disable=C0103\r\n noshib_response = self.client.get(TARGET_URL, follow=True)\r\n self.assertEqual(noshib_response.redirect_chain[-1],\r\n ('http://testserver/accounts/login?next={url}'.format(url=TARGET_URL), 302))\r\n self.assertContains(noshib_response, (\"Log into your {platform_name} Account | {platform_name}\"\r\n .format(platform_name=settings.PLATFORM_NAME)))\r\n self.assertEqual(noshib_response.status_code, 200)\r\n\r\n TARGET_URL_SHIB = reverse('courseware', args=[self.shib_course.id.to_deprecated_string()]) # pylint: disable=C0103\r\n shib_response = self.client.get(**{'path': TARGET_URL_SHIB,\r\n 'follow': True,\r\n 'REMOTE_USER': self.extauth.external_id,\r\n 'Shib-Identity-Provider': 'https://idp.stanford.edu/'})\r\n # Test that the shib-login redirect page with ?next= and the desired page are part of the redirect chain\r\n # The 'courseware' page actually causes a redirect itself, so it's not the end of the chain and we\r\n # won't test its contents\r\n self.assertEqual(shib_response.redirect_chain[-3],\r\n ('http://testserver/shib-login/?next={url}'.format(url=TARGET_URL_SHIB), 302))\r\n self.assertEqual(shib_response.redirect_chain[-2],\r\n ('http://testserver{url}'.format(url=TARGET_URL_SHIB), 302))\r\n self.assertEqual(shib_response.status_code, 200)",
"def includeme(config):\n # authentication\n auth_secret = os.environ.get('AUTH_SECRET', '')\n auth_policy = AuthTktAuthenticationPolicy(\n secret=auth_secret,\n hashalg='sha512'\n )\n config.set_authentication_policy(auth_policy)\n # authorization\n authz_policy = ACLAuthorizationPolicy()\n config.set_authorization_policy(authz_policy)\n config.set_root_factory(MyRoot)\n\n session_secret = os.environ.get('SESSION_SECRET', '')\n session_factory = SignedCookieSessionFactory(session_secret)\n config.set_session_factory(session_factory)\n config.set_default_csrf_options(require_csrf=True)",
"def includeme(config):\n # Grab the pyramid-wide settings, to look for any auth config.\n settings = config.get_settings().copy()\n # Use the settings to construct an AuthenticationPolicy.\n authn_policy = SRPAuthenticationPolicy.from_settings(settings)\n config.set_authentication_policy(authn_policy)\n # Hook up a default AuthorizationPolicy.\n # You can't have one without the other, and ACLAuthorizationPolicy is\n # usually what you want. If the app configures one explicitly then this\n # will get overridden.\n authz_policy = ACLAuthorizationPolicy()\n config.set_authorization_policy(authz_policy)\n # Add forbidden view to challenge for auth credentials.\n config.add_view(authn_policy.challenge_view,\n context=\"pyramid.exceptions.Forbidden\")",
"def samladsv3(self):\n try:\n # Get the federated credentials from the user\n print(\"[-] Get authentication token\")\n print(\"Email:\", end=' ')\n username = input()\n password = getpass.getpass()\n print('')\n\n # Initiate session handler\n session = requests.Session()\n\n # Programmatically get the SAML assertion\n # Opens the initial IdP url and follows all of the HTTP302 redirects, and\n # gets the resulting login page\n formresponse = session.get(idpentryurl, verify=sslverification)\n # Capture the idpauthformsubmiturl, which is the final url after all the 302s\n idpauthformsubmiturl = formresponse.url\n\n # Parse the response and extract all the necessary values\n # in order to build a dictionary of all of the form values the IdP expects\n formsoup = BeautifulSoup(formresponse.text, \"html.parser\")\n payload = {}\n\n for inputtag in formsoup.find_all(re.compile('(INPUT|input)')):\n name = inputtag.get('name','')\n value = inputtag.get('value','')\n if \"user\" in name.lower():\n #Make an educated guess that this is the right field for the username\n payload[name] = username\n elif \"email\" in name.lower():\n #Some IdPs also label the username field as 'email'\n payload[name] = username\n elif \"pass\" in name.lower():\n #Make an educated guess that this is the right field for the password\n payload[name] = password\n else:\n #Simply populate the parameter with the existing value (picks up hidden fields in the login form)\n payload[name] = value\n\n # Debug the parameter payload if needed\n # Use with caution since this will print sensitive output to the screen\n #print(payload)\n\n # Some IdPs don't explicitly set a form action, but if one is set we should\n # build the idpauthformsubmiturl by combining the scheme and hostname\n # from the entry url with the form action target\n # If the action tag doesn't exist, we just stick with the\n # idpauthformsubmiturl above\n for inputtag in formsoup.find_all(re.compile('(FORM|form)')):\n action = inputtag.get('action')\n loginid = inputtag.get('id')\n if (action and loginid == \"loginForm\"):\n parsedurl = urlparse(idpentryurl)\n idpauthformsubmiturl = parsedurl.scheme + \"://\" + parsedurl.netloc + action\n\n # Performs the submission of the IdP login form with the above post data\n response = session.post(\n idpauthformsubmiturl, data=payload, verify=sslverification)\n\n # Debug the response if needed\n #print(response.text)\n\n # Overwrite and delete the credential variables, just for safety\n username = '##############################################'\n password = '##############################################'\n del username\n del password\n\n # Decode the response and extract the SAML assertion\n soup = BeautifulSoup(response.text, \"html.parser\")\n assertion = ''\n\n # Look for the SAMLResponse attribute of the input tag (determined by\n # analyzing the debug print lines above)\n for inputtag in soup.find_all('input'):\n if(inputtag.get('name') == 'SAMLResponse'):\n #print(inputtag.get('value'))\n assertion = inputtag.get('value')\n\n # Better error handling is required for production use.\n if (assertion == ''):\n #TODO: Insert valid error checking/handling\n print('Response did not contain a valid SAML assertion')\n sys.exit(0)\n\n # Debug only\n #print(base64.b64decode(assertion))\n\n # Parse the returned assertion and extract the authorized roles\n awsroles = []\n root = ET.fromstring(base64.b64decode(assertion))\n for saml2attribute in root.iter('{urn:oasis:names:tc:SAML:2.0:assertion}Attribute'):\n if (saml2attribute.get('Name') == 'https://aws.amazon.com/SAML/Attributes/Role'):\n for saml2attributevalue in saml2attribute.iter('{urn:oasis:names:tc:SAML:2.0:assertion}AttributeValue'):\n awsroles.append(saml2attributevalue.text)\n\n # Note the format of the attribute value should be role_arn,principal_arn\n # but lots of blogs list it as principal_arn,role_arn so let's reverse\n # them if needed\n for awsrole in awsroles:\n chunks = awsrole.split(',')\n if'saml-provider' in chunks[0]:\n newawsrole = chunks[1] + ',' + chunks[0]\n index = awsroles.index(awsrole)\n awsroles.insert(index, newawsrole)\n awsroles.remove(awsrole)\n\n # If I have more than one role, ask the user which one they want,\n # otherwise just proceed\n print(\"\")\n if len(awsroles) > 1:\n i = 0\n print(\"Please choose the role you would like to assume:\")\n for awsrole in awsroles:\n print('[', i, ']: ', awsrole.split(',')[0])\n i += 1\n print(\"Selection: \", end=' ')\n selectedroleindex = input()\n\n # Basic sanity check of input\n if int(selectedroleindex) > (len(awsroles) - 1):\n print('You selected an invalid role index, please try again')\n sys.exit(0)\n\n role_arn = awsroles[int(selectedroleindex)].split(',')[0]\n principal_arn = awsroles[int(selectedroleindex)].split(',')[1]\n else:\n role_arn = awsroles[0].split(',')[0]\n principal_arn = awsroles[0].split(',')[1]\n\n # Use the assertion to get an AWS STS token using Assume Role with SAML\n conn = boto3.client('sts', region_name=region)\n token = conn.assume_role_with_saml(RoleArn=role_arn, PrincipalArn=principal_arn, SAMLAssertion=assertion)\n\n # Read in the existing config file\n config = configparser.RawConfigParser()\n config.read(credentials)\n\n # Put the credentials into a saml specific section instead of clobbering\n # the default credentials\n if not config.has_section('saml'):\n config.add_section('saml')\n\n config['saml']['output'] = outputformat\n config['saml']['region'] = region\n config['saml']['aws_access_key_id'] = token['Credentials']['AccessKeyId']\n config['saml']['aws_secret_access_key'] = token['Credentials']['SecretAccessKey']\n config['saml']['aws_session_token'] = token['Credentials']['SessionToken']\n\n # Write the updated config file\n with open(credentials, 'w+') as configfile:\n config.write(configfile)\n\n # Give the user some basic info as to what has just happened\n print('\\n\\n----------------------------------------------------------------')\n print('Your new access key pair has been stored in the AWS configuration file {0} under the saml profile.'.format(credentials))\n print('Note that it will expire at {0}.'.format(token['Credentials']['Expiration'].astimezone(get_localzone())))\n print('After this time, you may safely rerun this script to refresh your access key pair.')\n print('To use this credential, call the AWS CLI with the --profile option (e.g. aws --profile saml ec2 describe-instances).')\n print('----------------------------------------------------------------\\n\\n')\n\n return samladsv3\n\n except Exception as e:\n print(\"Error while getting authentication token. %s\" % e)",
"def setup_auth_turing(cluster):\n # Read in auth info\n azure_file = os.path.join(ABSOLUTE_HERE, \"secrets\", \"turing-auth-key-prod.json\")\n with open(azure_file, \"r\") as stream:\n azure = json.load(stream)\n\n # Login in to Azure\n login_cmd = [\n \"az\", \"login\", \"--service-principal\",\n \"--username\", azure[\"sp-app-id\"],\n \"--password\", azure[\"sp-app-key\"],\n \"--tenant\", azure[\"tenant-id\"]\n ]\n subprocess.check_output(login_cmd)\n\n # Set kubeconfig\n creds_cmd = [\n \"az\", \"aks\", \"get-credentials\",\n \"--name\", cluster,\n \"--resource-group\", \"binder-prod\"\n\n ]\n stdout = subprocess.check_output(creds_cmd)\n print(stdout.decode('utf-8'))",
"def test_open_id_setup(self):\r\n self.attempt_login(200)",
"def request_app_setup(hass, config, add_devices, discovery_info=None):\n from requests.compat import urljoin\n from requests_oauthlib import OAuth2Session\n configurator = hass.components.configurator\n authorization_base_url = urljoin(BASE_URL, '/oauth/authorize')\n oauth = OAuth2Session(config[CONF_CLIENT_ID], redirect_uri=REDIRECT_URI, state=None)\n\n def trakt_configuration_callback(data):\n \"\"\"Run when the configuration callback is called.\"\"\"\n token_url = urljoin(BASE_URL, '/oauth/token')\n oauth.fetch_token(token_url, client_secret=config[CONF_CLIENT_SECRET], code=data.get('pin_code'))\n token = oauth.token['access_token']\n save_token(hass, token)\n continue_setup_platform(hass, config, token, add_devices, discovery_info)\n\n if 'trakt' not in _CONFIGURING:\n authorization_url, _ = oauth.authorization_url(authorization_base_url, username=config[CONF_USERNAME])\n\n _CONFIGURING['trakt'] = configurator.request_config(\n 'Trakt',\n trakt_configuration_callback,\n description=\"Enter pin code from Trakt: \" + authorization_url,\n submit_caption='Verify',\n fields=[{\n 'id': 'pin_code',\n 'name': \"Pin code\",\n 'type': 'string'}]\n )",
"def test_auth0_config_anon(anontestapp, registry):\n _test_auth_config(anontestapp, registry)",
"def setUp(self):\n super().setUp()\n\n # Mock the call to the SAP SuccessFactors assertion endpoint\n SAPSF_ASSERTION_URL = 'http://successfactors.com/oauth/idp'\n\n def assertion_callback(_request, _uri, headers):\n \"\"\"\n Return a fake assertion after checking that the input is what we expect.\n \"\"\"\n assert b'private_key=fake_private_key_here' in _request.body\n assert b'user_id=myself' in _request.body\n assert b'token_url=http%3A%2F%2Fsuccessfactors.com%2Foauth%2Ftoken' in _request.body\n assert b'client_id=TatVotSEiCMteSNWtSOnLanCtBGwNhGB' in _request.body\n return (200, headers, 'fake_saml_assertion')\n\n httpretty.register_uri(httpretty.POST, SAPSF_ASSERTION_URL, content_type='text/plain', body=assertion_callback)\n\n SAPSF_BAD_ASSERTION_URL = 'http://successfactors.com/oauth-fake/idp'\n\n def bad_callback(_request, _uri, headers):\n \"\"\"\n Return a 404 error when someone tries to call the URL.\n \"\"\"\n return (404, headers, 'NOT AN ASSERTION')\n\n httpretty.register_uri(httpretty.POST, SAPSF_BAD_ASSERTION_URL, content_type='text/plain', body=bad_callback)\n\n # Mock the call to the SAP SuccessFactors token endpoint\n SAPSF_TOKEN_URL = 'http://successfactors.com/oauth/token'\n\n def token_callback(_request, _uri, headers):\n \"\"\"\n Return a fake assertion after checking that the input is what we expect.\n \"\"\"\n assert b'assertion=fake_saml_assertion' in _request.body\n assert b'company_id=NCC1701D' in _request.body\n assert b'grant_type=urn%3Aietf%3Aparams%3Aoauth%3Agrant-type%3Asaml2-bearer' in _request.body\n assert b'client_id=TatVotSEiCMteSNWtSOnLanCtBGwNhGB' in _request.body\n return (200, headers, '{\"access_token\": \"faketoken\"}')\n\n httpretty.register_uri(httpretty.POST, SAPSF_TOKEN_URL, content_type='application/json', body=token_callback)\n\n # Mock the call to the SAP SuccessFactors OData user endpoint\n ODATA_USER_URL = (\n 'http://api.successfactors.com/odata/v2/User(userId=\\'myself\\')'\n '?$select=firstName,lastName,defaultFullName,email'\n )\n\n def user_callback(request, _uri, headers):\n auth_header = request.headers.get('Authorization')\n assert auth_header == 'Bearer faketoken'\n return (\n 200,\n headers,\n json.dumps({\n 'd': {\n 'username': 'jsmith',\n 'firstName': 'John',\n 'lastName': 'Smith',\n 'defaultFullName': 'John Smith',\n 'email': 'john@smith.com',\n 'country': 'Australia',\n }\n })\n )\n\n httpretty.register_uri(httpretty.GET, ODATA_USER_URL, content_type='application/json', body=user_callback)",
"def test_register(self):\n self._configure_testshib_provider()\n self._test_register()",
"def provider(hass):\n provider = hass.loop.run_until_complete(\n register_auth_provider(hass, {\"type\": \"homeassistant\"})\n )\n hass.loop.run_until_complete(provider.async_initialize())\n return provider",
"def on_identity_loaded(sender, identity):\n key = current_app.config.get(\n \"OAUTHCLIENT_CERN_OPENID_SESSION_KEY\",\n OAUTHCLIENT_CERN_OPENID_SESSION_KEY,\n )\n identity.provides.update(session.get(key, []))",
"def post_setup(cls):\n super().post_setup()\n\n # The SENTRY_DSN setting should be available to activate sentry for an environment\n if cls.SENTRY_DSN is not None:\n sentry_sdk.init( # pylint: disable=abstract-class-instantiated\n dsn=cls.SENTRY_DSN,\n environment=cls._get_environment(),\n release=get_release(),\n integrations=[DjangoIntegration()],\n )\n with sentry_sdk.configure_scope() as scope:\n scope.set_extra(\"application\", \"backend\")",
"def test_client_key_secret(self):\r\n #this adds lti passports to system\r\n mocked_course = Mock(lti_passports = ['lti_id:test_client:test_secret'])\r\n modulestore = Mock()\r\n modulestore.get_course.return_value = mocked_course\r\n runtime = Mock(modulestore=modulestore)\r\n self.xmodule.descriptor.runtime = runtime\r\n self.xmodule.lti_id = \"lti_id\"\r\n key, secret = self.xmodule.get_client_key_secret()\r\n expected = ('test_client', 'test_secret')\r\n self.assertEqual(expected, (key, secret))",
"def test_shib_login_enrollment(self):\r\n student = UserFactory.create()\r\n extauth = ExternalAuthMap(external_id='testuser@stanford.edu',\r\n external_email='',\r\n external_domain='shib:https://idp.stanford.edu/',\r\n external_credentials=\"\",\r\n internal_password=\"password\",\r\n user=student)\r\n student.set_password(\"password\")\r\n student.save()\r\n extauth.save()\r\n\r\n course = CourseFactory.create(org='Stanford', number='123', display_name='Shib Only')\r\n course.enrollment_domain = 'shib:https://idp.stanford.edu/'\r\n self.store.update_item(course, '**replace_user**')\r\n\r\n # use django test client for sessions and url processing\r\n # no enrollment before trying\r\n self.assertFalse(CourseEnrollment.is_enrolled(student, course.id))\r\n self.client.logout()\r\n request_kwargs = {'path': '/shib-login/',\r\n 'data': {'enrollment_action': 'enroll', 'course_id': course.id.to_deprecated_string(), 'next': '/testredirect'},\r\n 'follow': False,\r\n 'REMOTE_USER': 'testuser@stanford.edu',\r\n 'Shib-Identity-Provider': 'https://idp.stanford.edu/'}\r\n response = self.client.get(**request_kwargs)\r\n # successful login is a redirect to \"/\"\r\n self.assertEqual(response.status_code, 302)\r\n self.assertEqual(response['location'], 'http://testserver/testredirect')\r\n # now there is enrollment\r\n self.assertTrue(CourseEnrollment.is_enrolled(student, course.id))\r\n\r\n # Clean up and try again with POST (doesn't happen with real production shib, doing this for test coverage)\r\n self.client.logout()\r\n CourseEnrollment.unenroll(student, course.id)\r\n self.assertFalse(CourseEnrollment.is_enrolled(student, course.id))\r\n\r\n response = self.client.post(**request_kwargs)\r\n # successful login is a redirect to \"/\"\r\n self.assertEqual(response.status_code, 302)\r\n self.assertEqual(response['location'], 'http://testserver/testredirect')\r\n # now there is enrollment\r\n self.assertTrue(CourseEnrollment.is_enrolled(student, course.id))",
"def setup_course_with_proctoring_backend(self, proctoring_provider, escalation_email):\n course = CourseFactory.create(enable_proctored_exams=True,\n enable_timed_exams=True,\n proctoring_provider=proctoring_provider,\n proctoring_escalation_email=escalation_email)\n self.setup_course_url(course)",
"def test_client_key_secret(self):\n #this adds lti passports to system\n mocked_course = Mock(lti_passports=['lti_id:test_client:test_secret'])\n modulestore = Mock()\n modulestore.get_course.return_value = mocked_course\n runtime = Mock(modulestore=modulestore)\n self.xmodule.runtime = runtime\n self.xmodule.lti_id = \"lti_id\"\n key, secret = self.xmodule.get_client_key_secret()\n expected = ('test_client', 'test_secret')\n assert expected == (key, secret)"
] | [
"0.67878383",
"0.63547695",
"0.63465583",
"0.61199355",
"0.59225214",
"0.58825934",
"0.56425726",
"0.563755",
"0.562901",
"0.5535802",
"0.5476775",
"0.53583604",
"0.531819",
"0.5295527",
"0.52504843",
"0.51937705",
"0.5125788",
"0.50825155",
"0.5081821",
"0.5010863",
"0.49874368",
"0.497723",
"0.49631655",
"0.495106",
"0.4943688",
"0.49377185",
"0.49259707",
"0.49202064",
"0.48945984",
"0.48763403"
] | 0.7070957 | 0 |
Configure TestShib before running the login test | def test_login(self):
self._configure_testshib_provider()
self._test_login() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_register(self):\n self._configure_testshib_provider()\n self._test_register()",
"def set_up_login():\n\n bitool.app.testing = True\n bitool.app.config['TESTING'] = True\n bitool.app.login_manager.init_app(bitool.app)\n app = bitool.app.test_client()\n\n return app",
"def configure_test(self, test, config_json):\n pass",
"def test_activate_login(self):\r\n pass",
"def setUp(self):\n self.user = {\n \"Email\": \"user@example.com\",\n \"Password\": \"pass1234\",\n \"Confirm Password\": \"pass1234\"\n }\n self.app = create_app('testing')\n self.client = self.app.test_client",
"def setUp(self):\n\n self.client = app.test_client()\n app.config['TESTING'] = True\n app.config['SECRET_KEY'] = 'key'",
"def setUp(self):\n self.app = app.test_client()\n self.new_user_login = {\n 'username': 'daniel',\n 'password': 'kamarster@gmail.com'\n }\n self.new_user_info = {\n 'username': 'daniel',\n 'fullname': 'daniel jambo',\n 'email': 'daniel.kamar@gmail.com',\n 'password': 'kamarster@gmail.com'\n }",
"def setUpConfig(self):\n pass",
"def setUp(self):\n\n app.config['TESTING'] = True\n app.config['SECRET_KEY'] = 'key'\n self.client = app.test_client()\n\n with self.client as c:\n with c.session_transaction() as sess:\n sess['user_id'] = 1",
"def setUpClass(cls):\n app.config['TESTING'] = True\n app.config['CSRF_ENABLED'] = False\n app.config['WTF_CSRF_ENABLED'] = False\n app.config['SQLALCHEMY_DATABASE_URI'] = \\\n 'sqlite:///' + TEST_DATABASE_PATH\n\n # Disable login_required for tests\n # Use self.enable_login() context manager to enable for a test\n app.login_manager._login_disabled = True\n\n # Disable session protection, since `follow_redirects=True` doesn't\n # seem to maintain request metadata (e.g. when using 'REMOTE_ADDR')\n # (Is there a better way?)\n app.login_manager.session_protection = None",
"def setup_class(self):\n self.endpoint = VERSION_PREFIX + '/auth/login'\n self.test_client = create_app().test_client()",
"def setUp(self):\n _, instance_path, shared_inputs = sys.argv\n app = lnt.server.ui.app.App.create_standalone(instance_path)\n app.testing = True\n self.client = app.test_client()\n self.shared_inputs = shared_inputs",
"def setUp(self):\r\n\r\n app.config['TESTING'] = True\r\n self.client = app.test_client()",
"def setUp(self):\n\n app.config[\"TESTING\"] = True\n app.config[\"SECRET_KEY\"] = \"ABC\"\n self.client = app.test_client()\n\n # Connect to test database\n connect_to_db(app)\n db.drop_all()\n db.create_all()\n load_test()\n\n # Put user1 into session.\n with self.client as c:\n with c.session_transaction() as sess:\n sess[\"current_user\"] = 1",
"def test_set_session():",
"def setUp(self):\n\n self.client = app.test_client()\n app.config['TESTING'] = True",
"def setUp(self):\n\n self.client = app.test_client()\n app.config['TESTING'] = True",
"def setUp(self):\n\n self.client = app.test_client()\n app.config['TESTING'] = True",
"def setUp(self):\n\n self.client = app.test_client()\n app.config['TESTING'] = True",
"def setUp(self):\n\n self.client = app.test_client()\n app.config['TESTING'] = True",
"def setUp(self): \n self.client = app.test_client()\n self.acceso = login(self.client)\n identity_loaded.connect(_on_principal_init)",
"def setUp(self):\n #app['TESTING'] = True\n self.test_app = app.test_client()",
"def setUp(self):\n super(LoginTest, self).setUp()\n self.login_url = \"http://localhost:5000/login\"\n self.logout_url = \"http://localhost:5000/logout\"\n self.valid_health_card_nb = \"XGCB 1090 0810\"\n self.password = \"password\"\n self.send_post(self.logout_url)\n cache.reset_cache()",
"def setUp(self):\n super(TestControlsImport, self).setUp()\n self.client.get(\"/login\")",
"def setUpClass(cls, user=''):\n super().setUpClass(first_admin)",
"def setUp(self):\n self.app = create_app(config_name=\"testing\")\n self.app_context = self.app.app_context()\n self.app_context.push()\n self.client = self.app.test_client()\n\n\n self.user = {\n\t \"firstname\": \"Michael\",\n\t \"lastname\": \"Mbugua\",\n \"othername\": \"Mike\",\n \"email\": \"mike@gmail.com\",\n \"phoneNumber\": \"0708453901\",\n \"username\": \"Thomas\",\n \"password\": \"Aw3someSauce\"\n \n }",
"def setUp(self):\n\n app.config['TESTING'] = True\n self.client = app.test_client()\n\n with self.client as c:\n with c.session_transaction() as sess:\n sess['user_id'] = 1",
"def setUp(self):\n\n from . import main\n\n from .models import (\n get_engine,\n get_session_factory,\n get_tm_session,\n )\n\n self.config={\n 'admin_password':self.admin_login['password'],\n 'sqlalchemy.url':'sqlite://',\n 'auth.secret':'secret'\n }\n\n self.app = main({}, **self.config)\n self.init_database()\n self.testapp=webtest.TestApp(self.app)",
"def test_login_required():\n pass",
"def setUp(self):\r\n\r\n app.config['TESTING'] = True\r\n app.config['SECRET_KEY'] = 'testingKey'\r\n self.client = app.test_client()\r\n\r\n with self.client as c:\r\n with c.session_transaction() as sess:\r\n sess['email'] = \"jeffrey62@alexander.net\""
] | [
"0.68922627",
"0.6773958",
"0.6753465",
"0.6616434",
"0.65026134",
"0.6428037",
"0.6423407",
"0.6388931",
"0.63668215",
"0.6346543",
"0.63392085",
"0.6336799",
"0.62982786",
"0.62645006",
"0.62591666",
"0.62502235",
"0.62502235",
"0.62462556",
"0.62462556",
"0.62462556",
"0.62439245",
"0.6243138",
"0.6242279",
"0.62198436",
"0.62160385",
"0.6207759",
"0.620737",
"0.61967075",
"0.6193627",
"0.6192629"
] | 0.81601787 | 0 |
Configure TestShib before running the register test | def test_register(self):
self._configure_testshib_provider()
self._test_register() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def configure_test(self, test, config_json):\n pass",
"def test_register():\n plug.manager.register(junit4)",
"def setUpConfig(self):\n pass",
"def test_login(self):\n self._configure_testshib_provider()\n self._test_login()",
"def setup_method(self, test_method):\n self.wo_obj = TestCases()\n self.global_config, self.test_args = self.wo_obj.get_config_data(test_method=test_method.__name__)",
"def startTestHook(self):",
"def setUp(self):\n self.modules = {}",
"def runTest(self):\n self.setUp()\n self.test_ExtendSpine1()",
"def test_scrapping(self):\n self.assertEqual(ScrappingConfig.name, \"scrapping\")",
"def config_setup(self, config):\n super(PushGatewayApiV1TestCase, self).config_setup(config)\n config[\"apps\"][\"com.example.spqr\"] = {\n \"type\": \"tests.test_pushgateway_api_v1.TestPushkin\"\n }",
"def configure(self):",
"def configure(self):",
"def configure(self):",
"def configure(self):",
"def testInit(self):\n self.globalInit()\n self.test.start()",
"def test_install(self):\n pass",
"def setUp(self):\n event_bus._event_bus = event_bus._EventBus()",
"def _set_up():\n repl._setUp = self.setUp",
"def setUpModule():\n base.enabledPlugins.append('jobs')\n base.enabledPlugins.append('romanesco')\n base.enabledPlugins.append('gravatar')\n base.enabledPlugins.append('minerva')\n base.startServer(False)",
"def setUp(self):\n #app['TESTING'] = True\n self.test_app = app.test_client()",
"def setUp_extra(self):\n pass",
"def setUp(self):\n self.supvisors = DummySupvisors()",
"def setUpClass(cls):\n super().setUpClass(application_name='ovn-chassis')",
"def setUp(self):\r\n\r\n app.config['TESTING'] = True\r\n self.client = app.test_client()",
"def setUp(self):\n\n self.client = app.test_client()\n app.config['TESTING'] = True",
"def setUp(self):\n\n self.client = app.test_client()\n app.config['TESTING'] = True",
"def setUp(self):\n\n self.client = app.test_client()\n app.config['TESTING'] = True",
"def setUp(self):\n\n self.client = app.test_client()\n app.config['TESTING'] = True",
"def setUp(self):\n\n self.client = app.test_client()\n app.config['TESTING'] = True",
"def configure(self) -> None:"
] | [
"0.7068733",
"0.6525172",
"0.6467879",
"0.63710594",
"0.6313307",
"0.6234248",
"0.6212751",
"0.60972005",
"0.6043006",
"0.6039801",
"0.5995316",
"0.5995316",
"0.5995316",
"0.5995316",
"0.5992337",
"0.5989009",
"0.59598655",
"0.5953964",
"0.5928903",
"0.59063",
"0.5904783",
"0.5883329",
"0.5874261",
"0.5873353",
"0.5849461",
"0.5849461",
"0.5849461",
"0.5846878",
"0.5846878",
"0.5832195"
] | 0.83610606 | 0 |
Test that attributes sent by a SAML provider are stored in the UserSocialAuth table. | def test_login_records_attributes(self):
self.test_login()
record = UserSocialAuth.objects.get(
user=self.user, provider=self.PROVIDER_BACKEND, uid__startswith=self.PROVIDER_IDP_SLUG
)
attributes = record.extra_data
assert attributes.get('urn:oid:1.3.6.1.4.1.5923.1.1.1.9') == ['Member@testshib.org', 'Staff@testshib.org']
assert attributes.get('urn:oid:2.5.4.3') == ['Me Myself And I']
assert attributes.get('urn:oid:0.9.2342.19200300.100.1.1') == ['myself']
assert attributes.get('urn:oid:2.5.4.20') == ['555-5555']
# Phone number | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_register_insufficient_sapsf_metadata(self):\n self._configure_testshib_provider(\n identity_provider_type='sap_success_factors',\n metadata_source=TESTSHIB_METADATA_URL,\n other_settings='{\"key_i_dont_need\":\"value_i_also_dont_need\"}',\n )\n # Because we're getting details from the assertion, fall back to the initial set of details.\n self.USER_EMAIL = \"myself@testshib.org\"\n self.USER_NAME = \"Me Myself And I\"\n self.USER_USERNAME = \"myself\"\n self._test_register()",
"def test_attribute_authenticated_has_attributes(testapp, login_fixture, fill_the_db):\n response = testapp.get('/attribute/1/1', params=login_fixture)\n assert len(response.html.find_all(\"img\")) == 2",
"def test_attributes(self):\n user = User()\n self.assertTrue(hasattr(user, \"email\"))\n self.assertTrue(hasattr(user, \"password\"))\n self.assertTrue(hasattr(user, \"first_name\"))\n self.assertTrue(hasattr(user, \"last_name\"))",
"def test_attributes(self):\n self.assertTrue(hasattr(User()), \"email\")\n self.assertTrue(hasattr(User()), \"password\")\n self.assertTrue(hasattr(User()), \"first_name\")\n self.assertTrue(hasattr(User()), \"last_name\")\n self.assertTrue(hasattr(User()), \"__init__\")",
"def _test_assessment_users(self, asmt, users):\n verification_errors = \"\"\n for user_name, expected_types in users.items():\n try:\n user = models.Person.query.filter_by(name=user_name).first()\n rel = models.Relationship.find_related(asmt, user)\n if expected_types:\n self.assertNotEqual(\n rel, None,\n \"User {} is not mapped to {}\".format(user.email, asmt.slug))\n self.assertIn(\"AssigneeType\", rel.relationship_attrs)\n self.assertEqual(\n set(rel.relationship_attrs[\n \"AssigneeType\"].attr_value.split(\",\")),\n expected_types\n )\n else:\n self.assertEqual(\n rel, None,\n \"User {} is mapped to {}\".format(user.email, asmt.slug))\n except AssertionError as error:\n verification_errors += \"\\n\\nChecks for Users-Assessment mapping \"\\\n \"failed for user '{}' with:\\n{}\".format(user_name, str(error))\n\n self.assertEqual(verification_errors, \"\", verification_errors)",
"def test_attributes(self):\n u = User.query.filter_by(username=\"jjones\").first()\n assert u.username == \"jjones\"\n assert u.email == \"jjones@yahoo.com\"\n assert len(u.reviews) == 4\n assert u.email_verified is False\n assert u._email_token_key == 'verify_email'\n assert u._password_token_key == 'reset_password'\n assert u.sentfriendrequests == []\n assert u.receivedfriendrequests == []\n assert u.sentgrouprequests == []\n u2 = User.query.get(1)\n assert u2 in u.friends\n assert type(u.address) == Address",
"def test_activity_attr(self):\n user = User()\n user_details = {\"student_id\": user.id, \"first_name\": \"Joe\"}\n student = Student(**user_details)\n self.assertTrue(hasattr(student, \"activity\"))\n if models.storage_t == 'db':\n self.assertEqual(student.activity, None)\n else:\n self.assertEqual(student.activity, \"\")",
"def assert_social_auth_exists_for_user(self, user, strategy):\r\n social_auths = strategy.storage.user.get_social_auth_for_user(\r\n user, provider=self.PROVIDER_CLASS.BACKEND_CLASS.name)\r\n self.assertEqual(1, len(social_auths))\r\n self.assertEqual(self.backend_name, social_auths[0].provider)",
"def test_first_name_attr(self):\n user = User()\n user_details = {\"student_id\": user.id, \"first_name\": \"Joe\"}\n student = Student(**user_details)\n self.assertTrue(hasattr(student, \"first_name\"))\n if models.storage_t == 'db':\n self.assertEqual(student.first_name, \"Joe\")\n else:\n self.assertEqual(student.first_name, \"Joe\")",
"def test_attribute_view_authenticated(testapp, fill_the_db, login_fixture):\n response = testapp.get('/attribute/1/1', params=login_fixture)\n assert response.status_code == 200",
"def _configure_testshib_provider(self, **kwargs):\n fetch_metadata = kwargs.pop('fetch_metadata', True)\n assert_metadata_updates = kwargs.pop('assert_metadata_updates', True)\n kwargs.setdefault('name', self.PROVIDER_NAME)\n kwargs.setdefault('enabled', True)\n kwargs.setdefault('visible', True)\n kwargs.setdefault(\"backend_name\", \"tpa-saml\")\n kwargs.setdefault('slug', self.PROVIDER_IDP_SLUG)\n kwargs.setdefault('entity_id', TESTSHIB_ENTITY_ID)\n kwargs.setdefault('metadata_source', TESTSHIB_METADATA_URL)\n kwargs.setdefault('icon_class', 'fa-university')\n kwargs.setdefault('attr_email', 'urn:oid:1.3.6.1.4.1.5923.1.1.1.6') # eduPersonPrincipalName\n kwargs.setdefault('max_session_length', None)\n kwargs.setdefault('send_to_registration_first', False)\n kwargs.setdefault('skip_email_verification', False)\n saml_provider = self.configure_saml_provider(**kwargs) # pylint: disable=no-member\n\n if fetch_metadata:\n assert httpretty.is_enabled() # lint-amnesty, pylint: disable=no-member\n num_total, num_skipped, num_attempted, num_updated, num_failed, failure_messages = fetch_saml_metadata()\n if assert_metadata_updates:\n assert num_total == 1 # lint-amnesty, pylint: disable=no-member\n assert num_skipped == 0 # lint-amnesty, pylint: disable=no-member\n assert num_attempted == 1 # lint-amnesty, pylint: disable=no-member\n assert num_updated == 1 # lint-amnesty, pylint: disable=no-member\n assert num_failed == 0 # lint-amnesty, pylint: disable=no-member\n assert len(failure_messages) == 0 # lint-amnesty, pylint: disable=no-member\n return saml_provider",
"def test_configure_testshib_provider_with_cache_duration(self):\n kwargs = {}\n kwargs.setdefault('name', self.PROVIDER_NAME)\n kwargs.setdefault('enabled', True)\n kwargs.setdefault('visible', True)\n kwargs.setdefault('slug', self.PROVIDER_IDP_SLUG)\n kwargs.setdefault('entity_id', TESTSHIB_ENTITY_ID)\n kwargs.setdefault('metadata_source', TESTSHIB_METADATA_URL_WITH_CACHE_DURATION)\n kwargs.setdefault('icon_class', 'fa-university')\n kwargs.setdefault('attr_email', 'urn:oid:1.3.6.1.4.1.5923.1.1.1.6') # eduPersonPrincipalName\n self.configure_saml_provider(**kwargs)\n assert httpretty.is_enabled()\n num_total, num_skipped, num_attempted, num_updated, num_failed, failure_messages = fetch_saml_metadata()\n assert num_total == 1\n assert num_skipped == 0\n assert num_attempted == 1\n assert num_updated == 1\n assert num_failed == 0\n assert len(failure_messages) == 0",
"def test_register_sapsf_metadata_present(self):\n expected_country = 'AU'\n provider_settings = {\n 'sapsf_oauth_root_url': 'http://successfactors.com/oauth/',\n 'sapsf_private_key': 'fake_private_key_here',\n 'odata_api_root_url': 'http://api.successfactors.com/odata/v2/',\n 'odata_company_id': 'NCC1701D',\n 'odata_client_id': 'TatVotSEiCMteSNWtSOnLanCtBGwNhGB',\n }\n\n self._configure_testshib_provider(\n identity_provider_type='sap_success_factors',\n metadata_source=TESTSHIB_METADATA_URL,\n other_settings=json.dumps(provider_settings)\n )\n self._test_register(country=expected_country)",
"def test_claims_supported_set(self):\n expected_claims = ['openid', 'email']\n\n request = self.factory.get(self.url)\n\n response = ProviderInfoView.as_view()(request)\n dic = json.loads(response.content.decode('utf-8'))\n self.assertEqual(dic['claims_supported'], expected_claims)",
"def test_add_authenticated_session_var(self):\r\n req = Mock(authname='john', base_path='/', incookie=Cookie())\r\n session = Session(self.env, req)\r\n session['foo'] = 'bar'\r\n session.save()\r\n cursor = self.db.cursor()\r\n cursor.execute(\"SELECT value FROM session_attribute WHERE sid='john'\"\r\n \"AND name='foo'\") \r\n self.assertEqual('bar', cursor.fetchone()[0])",
"def test_read_user_identity_mapping(self):\n pass",
"def testMetadata(self):\n self.assertGreater(len(self.unauth.metadata(self.dataset)), 0)\n self.assertGreater(len(self.auth.metadata(self.dataset)), 0)",
"def test_all_user_active(self):\n procedure = Procedure.objects.first()\n students = Student.objects.filter(promotion=procedure.promotion)\n for student in students:\n self.assertEqual(student.user.is_active, True)",
"def test_user_attrs(self):\n # These are 'functional' level tests for common use cases. Direct\n # testing of the implementation (SimpleLazyObject) is in the 'utils'\n # tests.\n self.client.login(username=\"super\", password=\"secret\")\n user = authenticate(username=\"super\", password=\"secret\")\n response = self.client.get(\"/auth_processor_user/\")\n self.assertContains(response, \"unicode: super\")\n self.assertContains(response, \"id: %d\" % self.superuser.pk)\n self.assertContains(response, \"username: super\")\n # bug #12037 is tested by the {% url %} in the template:\n self.assertContains(response, \"url: /userpage/super/\")\n\n # A Q() comparing a user and with another Q() (in an AND or OR fashion).\n Q(user=response.context[\"user\"]) & Q(someflag=True)\n\n # Tests for user equality. This is hard because User defines\n # equality in a non-duck-typing way\n # See bug #12060\n self.assertEqual(response.context[\"user\"], user)\n self.assertEqual(user, response.context[\"user\"])",
"def test_get_attributes(test_common_dao):\n _session = test_common_dao.RAMSTK_SESSION(\n bind=test_common_dao.engine, autoflush=False, expire_on_commit=False)\n DUT = _session.query(RAMSTKSiteInfo).first()\n\n assert DUT.get_attributes() == ATTRIBUTES",
"def test_dataset_for_personal_accounts(self):\n pass",
"def test_user_login(self):\n\n for i in range(0, len(self.users)):\n\n # Gets user\n user = self.users[i]\n\n # Creates payload\n event = {\n \"username\": user['username'],\n \"pwd\": user['pwd']\n }\n\n # Invokes\n response = handler.user_login(event=event, context=None)\n\n # Validates response\n body_dict = json.loads(response['body'])\n apidataset_dict = body_dict['apidataset']\n self.assertEqual(response['statusCode'], 200)\n self.assertEqual (\n apidataset_dict['displayName'],\n user['nameFirst'] + ' ' + user['nameLast']\n )\n self.assertIn('sessionToken', apidataset_dict)",
"def test_create_user_identity_mapping(self):\n pass",
"def test_attribute_types(self):\n self.assertIsInstance(self.user_1.email, str)\n self.assertIsInstance(self.user_1.password, str)\n self.assertIsInstance(self.user_1.first_name, str)\n self.assertIsInstance(self.user_1.last_name, str)",
"def test_attributeCopied(self):\n self.assertIdentical(\n self.store.findUnique(AMPConfiguration).loginSystem,\n self.store.findUnique(LoginSystem))",
"def test_profile_associated_with_users(self):\n profile = ImagerProfile.objects.first()\n self.assertTrue(hasattr(profile, 'user'))\n self.assertIsInstance(profile.user, User)",
"def test_is_student_user(self):\n student = User.objects.get(email='teststudentuser@test.com')\n self.assertEqual(student.is_staff, False)",
"def acs(r):\n saml_client = _get_saml_client(get_current_domain(r))\n resp = r.POST.get('SAMLResponse', None)\n next_url = r.session.get('login_next_url')\n\n authn_response = saml_client.parse_authn_request_response(\n resp, entity.BINDING_HTTP_POST)\n if authn_response is None:\n return HttpResponse(\"Error at line 115\")\n\n user_identity = authn_response.get_identity()\n if user_identity is None:\n return HttpResponse(\"Error at line 118\")\n\n\n user_email = user_identity[\n settings.SAML2_AUTH\n .get('ATTRIBUTES_MAP', {})\n .get('email', 'Email')\n ][0]\n user_name = user_identity[\n settings.SAML2_AUTH\n .get('ATTRIBUTES_MAP', {})\n .get('username', 'UserName')\n ][0]\n user_first_name = user_identity[\n settings.SAML2_AUTH\n .get('ATTRIBUTES_MAP', {})\n .get('first_name', 'FirstName')\n ][0]\n user_last_name = user_identity[\n settings.SAML2_AUTH\n .get('ATTRIBUTES_MAP', {})\n .get('last_name', 'LastName')\n ][0]\n\n target_user = None\n is_new_user = False\n\n try:\n target_user = User.objects.get(username=user_name)\n if settings.SAML2_AUTH.get('TRIGGER', {}).get('BEFORE_LOGIN', None):\n import_string(\n settings.SAML2_AUTH['TRIGGER']['BEFORE_LOGIN']\n )(user_identity)\n except User.DoesNotExist:\n target_user = _create_new_user(\n user_name, user_email,\n user_first_name, user_last_name\n )\n if settings.SAML2_AUTH.get('TRIGGER', {}).get('CREATE_USER', None):\n import_string(\n settings.SAML2_AUTH['TRIGGER']['CREATE_USER']\n )(user_identity)\n is_new_user = True\n\n r.session.flush()\n\n if target_user.is_active:\n target_user.backend = 'django.contrib.auth.backends.ModelBackend'\n login(r, target_user)\n else:\n return HttpResponse(\"Error at line 169\")\n\n if is_new_user:\n try:\n return render(\n r, 'django_saml2_auth/welcome.html',\n {'user': r.user}\n )\n except TemplateDoesNotExist:\n return HttpResponseRedirect(next_url)\n else:\n return HttpResponseRedirect(next_url)",
"def test_user_information_request(self):\n pass",
"def test_creation_profile_2():\n assert tuple_NT[0][1] == LIST_dict[0]['sex'], \"sex of profile is not getting stored properly\""
] | [
"0.6245322",
"0.59301543",
"0.5911321",
"0.58538973",
"0.58124703",
"0.57879347",
"0.56024635",
"0.5550127",
"0.55485487",
"0.54875433",
"0.54671925",
"0.54621214",
"0.54423463",
"0.5395432",
"0.5389309",
"0.5383121",
"0.5311547",
"0.5295665",
"0.5293737",
"0.52753067",
"0.5272109",
"0.5255254",
"0.5239738",
"0.5237269",
"0.5233854",
"0.522482",
"0.5209603",
"0.52070737",
"0.5201741",
"0.51940554"
] | 0.7325418 | 0 |
Test SAML login logs with debug mode enabled or not | def test_debug_mode_login(self, debug_mode_enabled):
self._configure_testshib_provider(debug_mode=debug_mode_enabled)
with patch.object(saml_log, 'info') as mock_log:
self._test_login()
if debug_mode_enabled:
# We expect that test_login() does two full logins, and each attempt generates two
# logs - one for the request and one for the response
assert mock_log.call_count == 4
expected_next_url = "/dashboard"
(msg, action_type, idp_name, request_data, next_url, xml), _kwargs = mock_log.call_args_list[0]
assert msg.startswith('SAML login %s')
assert action_type == 'request'
assert idp_name == self.PROVIDER_IDP_SLUG
self.assertDictContainsSubset(
{"idp": idp_name, "auth_entry": "login", "next": expected_next_url},
request_data
)
assert next_url == expected_next_url
assert '<samlp:AuthnRequest' in xml
(msg, action_type, idp_name, response_data, next_url, xml), _kwargs = mock_log.call_args_list[1]
assert msg.startswith('SAML login %s')
assert action_type == 'response'
assert idp_name == self.PROVIDER_IDP_SLUG
self.assertDictContainsSubset({"RelayState": idp_name}, response_data)
assert 'SAMLResponse' in response_data
assert next_url == expected_next_url
assert '<saml2p:Response' in xml
else:
assert not mock_log.called | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_logging_running(self):\n tester = app.test_client(self)\n response = tester.get('/login', content_type='html/text')\n self.assertTrue(b'PLEASE LOGIN' in response.data)",
"def test_logging(self):\n self._verify_logging()",
"def test_successful_login(self):\n pass",
"def test_login_required():\n pass",
"def test_logPage(self):\n\n # Create a test client\n client = server.app.test_client()\n\n # Use the test client to make requests\n result = client.post('/login', follow_redirects=True, data={'username':'sadaqiq@gmail.com',\n 'password':'hadis'})\n\n print(\"it is printing \")\n\n # Compare result.data with assert method\n self.assertIn(b'New Event', result.data)",
"def test_setup_logging_debug(self) -> None:\n # set the log level high to ensure they are properly being change by setup_logging\n self.f_logger.setLevel(LogLevels.CRITICAL)\n self.boto3_logger.setLevel(LogLevels.CRITICAL)\n self.botocore_logger.setLevel(LogLevels.CRITICAL)\n\n with self.assertLogs(self.f_logger, LogLevels.DEBUG) as setup_ctx:\n setup_logging(LogLevels.DEBUG)\n\n self.assertTrue(self.f_logger.isEnabledFor(LogLevels.CRITICAL))\n self.assertTrue(self.f_logger.isEnabledFor(LogLevels.ERROR))\n self.assertTrue(self.f_logger.isEnabledFor(LogLevels.SUCCESS))\n self.assertTrue(self.f_logger.isEnabledFor(LogLevels.WARNING))\n self.assertTrue(self.f_logger.isEnabledFor(LogLevels.NOTICE))\n self.assertTrue(self.f_logger.isEnabledFor(LogLevels.INFO))\n self.assertTrue(self.f_logger.isEnabledFor(LogLevels.VERBOSE))\n self.assertTrue(self.f_logger.isEnabledFor(LogLevels.DEBUG))\n\n self.assertTrue(self.boto3_logger.isEnabledFor(LogLevels.DEBUG))\n self.assertTrue(self.botocore_logger.isEnabledFor(LogLevels.DEBUG))\n\n self.assertEqual(setup_ctx.output,\n [f'DEBUG:f-cli:Initalized logging for f-cli version {__version__}'])",
"def test_login(self):\n self._configure_testshib_provider()\n self._test_login()",
"def test_activate_login(self):\r\n pass",
"def test_login_page(self):\n r = requests.get(self.url)\n self.assertEqual(r.status_code, 200)\n soup = BeautifulSoup(r.content)\n self.assertEqual(soup.findAll('legend')[0].contents[0], 'Sign In')",
"def test_user_login(self):\n self.client.login(username=self.username, password=self.password)\n self.assertTrue(AuditTrail.objects.count() >= 2)\n self.assertEqual(\n AuditTrail.objects.last().level, AuditTrail.LEVEL_INFO)",
"def test_login_endpoint_not_verbose():\n url = f\"{DEXCOM_BASE_URL}/{DEXCOM_LOGIN_ENDPOINT}\"\n json = {\n \"accountName\": USERNAME,\n \"password\": \"a\",\n \"applicationId\": DEXCOM_APPLICATION_ID,\n }\n r = requests.request(\"post\", url, json=json,)\n assert r.json() == DEFAULT_SESSION_ID",
"def assertDebugOnly(self): # FIXME: when at python 3.10+ replace with assertNoLogs\n with self.assertLogs(\"qiskit.quantum_info.synthesis\", \"DEBUG\") as ctx:\n yield\n for i in range(len(ctx.records)):\n self.assertLessEqual(\n ctx.records[i].levelno,\n logging.DEBUG,\n msg=f\"Unexpected logging entry: {ctx.output[i]}\",\n )\n self.assertIn(\"Requested fidelity:\", ctx.records[i].getMessage())",
"def test_professor_can_login_to_web_portal(professor):",
"def test_aio_can_login_to_web_portal(aio):",
"def test_login_session_check(self):\r\n\t\tprint(\"\")\r\n\t\tprint(\"`login_session_check` method tests\")\r\n\t\tprint(\"---------------------\")\r\n\t\tprint(\"Test: `login_session_check: logged in`\")\r\n\t\tpath = 'login'\r\n\t\twith requests_mock.mock() as m:\r\n\t\t\tm.get(\r\n\t\t\t\tf'{host}/{basepath}/{path}',\r\n\t\t\t\tstatus_code = 200,\r\n\t\t\t\treason = 'OK',\r\n\t\t\t\ttext=\"\"\"{\r\n\t\t\t\t\t\"rows\":\r\n\t\t\t\t\t\t[\r\n\t\t\t\t\t\t\t{\"FORCE_PWD_CHANGE\":true,\r\n\t\t\t\t\t\t\t\"LAST_ACCT\":1,\r\n\t\t\t\t\t\t\t\"NEXT_PWNED\":null,\r\n\t\t\t\t\t\t\t\"PWD_EXPIRE\":\"2020-07-30\",\r\n\t\t\t\t\t\t\t\"ROOT\":true,\r\n\t\t\t\t\t\t\t\"USER\":\"restuser\",\r\n\t\t\t\t\t\t\t\"USER_ID\":2,\r\n\t\t\t\t\t\t\t\"expired_pwd\":false\r\n\t\t\t\t\t\t\t}\r\n\t\t\t\t\t\t],\r\n\t\t\t\t\t\"success\":true\r\n\t\t\t\t}\"\"\"\r\n\t\t\t)\r\n\t\t\tsession_check = sdk.login_session_check()\r\n\t\t\tassert session_check[0] == True\r\n\t\t\tassert session_check[1]['FORCE_PWD_CHANGE'] == True\r\n\t\t\tassert session_check[1]['LAST_ACCT'] == 1\r\n\t\t\tassert session_check[1]['NEXT_PWNED'] == None\r\n\t\t\tassert session_check[1]['ROOT'] == True\r\n\t\t\tassert session_check[1]['USER_ID'] == 2\r\n\t\t\tassert session_check[1]['USER'] == 'restuser'\r\n\t\t\tassert session_check[1]['expired_pwd'] == False\r\n\t\t\tprint(\"Passed!!!\")\r\n\t\t\tprint(\"Test: `login_session_check: not logged in`\")\r\n\t\t\tm.get(\r\n\t\t\t\tf'{host}/{basepath}/{path}',\r\n\t\t\t\tstatus_code = 200,\r\n\t\t\t\treason = 'OK',\r\n\t\t\t\ttext=\"\"\"{\r\n\t\t\t\t\t\"rows\":\t[],\r\n\t\t\t\t\t\"success\":false\r\n\t\t\t\t}\"\"\"\r\n\t\t\t)\r\n\t\t\tsession_check = sdk.login_session_check()\r\n\t\t\tassert session_check[0] == False\r\n\t\t\tassert not session_check[1] # dictionary should be empty\r\n\t\tprint(\"Passed!!!\")",
"def test_auth_xml(self):\n\n config = get_config()\n\n if config.getboolean('auth_test', 'enabled'):\n\n # Run only if enabled\n\n try:\n\n timestamp = config.getint('auth_test', 'timestamp')\n\n except ValueError:\n\n # If timestamp is set to a none-integer, we'll just assume\n # that it's unset\n\n timestamp = None\n\n response = authenticate(\n config.get('auth_test', 'url'),\n config.get('auth_test', 'account'),\n config.get('auth_test', 'preauthkey'),\n config.get('auth_test', 'account_by'),\n config.getint('auth_test', 'expires'),\n timestamp\n )\n\n self.assertNotEqual(\n response,\n None,\n \"Authentication with the configured settings \"\n \"was not successful\"\n )",
"def test_admin_can_login_to_web_portal(admin):",
"def testGetLogAuth(self):\n response = self._get('inventory/log/')\n self.assertEquals(response.status_code, 401)\n\n response = self._get('inventory/log/', username=\"testuser\",\n password=\"password\")\n self.assertEquals(response.status_code, 200)",
"def debugging_tests():\n logging.warning(\"Running debugging tests...\")\n pass",
"def test_level_debug(self):\n self.assertEqual(DiscordReportFormatter().format(self.record(loglevel=logging.DEBUG)), \":detective: **test**\")",
"def test_login(self):\n print(\"Test Login\")\n self.mock_api.return_value = LOGIN_RESPONSE\n self.manager.enabled = False\n assert self.manager.login()\n all_kwargs = parse_args(self.mock_api)\n assert assert_test(self.manager.login, all_kwargs, None,\n self.write_api, self.overwrite)",
"def test_show_login_page(self):\n with self.client as c:\n\n res = c.get(\"/login\")\n html = res.get_data(as_text=True)\n\n self.assertEqual(res.status_code, 200)\n self.assertIn(\"Don't have an account?\", html)",
"def test_login_failure(self):\n self.client.login(username=self.username, password='AWrongPassword')\n # 2: creation and login\n self.assertTrue(AuditTrail.objects.count() >= 2)\n self.assertEqual(\n AuditTrail.objects.last().level, AuditTrail.LEVEL_INFO)",
"def test_valid_login(self):\n self.assertTrue(self.session.authenticate('test@test.com', 'supersecret'))",
"def test_user_login(self):\n\n for i in range(0, len(self.users)):\n\n # Gets user\n user = self.users[i]\n\n # Creates payload\n event = {\n \"username\": user['username'],\n \"pwd\": user['pwd']\n }\n\n # Invokes\n response = handler.user_login(event=event, context=None)\n\n # Validates response\n body_dict = json.loads(response['body'])\n apidataset_dict = body_dict['apidataset']\n self.assertEqual(response['statusCode'], 200)\n self.assertEqual (\n apidataset_dict['displayName'],\n user['nameFirst'] + ' ' + user['nameLast']\n )\n self.assertIn('sessionToken', apidataset_dict)",
"def test_login(\n config,\n):\n with requests_mock.Mocker() as m:\n sms = YesssSMS.YesssSMS(LOGIN, YESSS_PASSWD)\n m.register_uri(\n \"POST\",\n # pylint: disable=protected-access\n sms._login_url,\n status_code=302,\n # pylint: disable=protected-access\n headers={\"location\": sms._kontomanager},\n )\n m.register_uri(\n \"GET\",\n # pylint: disable=protected-access\n sms._kontomanager,\n status_code=200,\n text=\"test...\" + LOGIN + \"</a>\",\n )\n m.register_uri(\n \"GET\",\n # pylint: disable=protected-access\n sms._logout_url,\n status_code=200,\n )\n # pylint: disable=protected-access\n session, request = sms._login(requests.Session(), get_request=True)\n # pylint: disable=protected-access\n session.get(sms._logout_url)\n # pylint: disable=protected-access\n assert sms._logindata[\"login_rufnummer\"][-7:] + \"</a>\" in request.text\n # pylint: disable=protected-access\n assert request.url == sms._kontomanager",
"def testLogin(self):\n mt = self.portal.portal_membership\n self.logout()\n self.login('abc123')\n member = mt.getAuthenticatedMember()\n self.failUnlessEqual(member.id, 'abc123', msg=\"incorrect user logged in: %s\" % member)",
"def assert_login_response_before_pipeline_looks_correct(self, response):\r\n self.assertEqual(200, response.status_code)\r\n self.assertIn('Sign in with ' + self.PROVIDER_CLASS.NAME, response.content)\r\n self.assert_javascript_would_submit_login_form(False, response)\r\n self.assert_signin_button_looks_functional(response.content, pipeline.AUTH_ENTRY_LOGIN)",
"def test_get_all_event_with_login(self):\n self.client.login(email='test@test.com', password='top_secret')\n response = self.client.get('/')\n self.assertEqual(response.status_code, 200)",
"def test_login(self):\n\n with self.client as c:\n result = c.post('/login',\n data={'email': 'cat@gmail.com', 'password': 'abc'},\n follow_redirects=True\n )\n self.assertEqual(session['user_id'], 1)\n self.assertIn(\"You are logged in\", result.data)\n\n print \"DONE WITH LOGIN CHECK\""
] | [
"0.6547012",
"0.61618876",
"0.6142526",
"0.6016165",
"0.5957221",
"0.59324735",
"0.5907222",
"0.58748484",
"0.58701736",
"0.5860925",
"0.582712",
"0.5819717",
"0.57623357",
"0.5748084",
"0.57179964",
"0.56889266",
"0.56684595",
"0.56650877",
"0.5635419",
"0.5612174",
"0.55851114",
"0.5562649",
"0.5553079",
"0.5520293",
"0.5515169",
"0.55113184",
"0.5478973",
"0.54718214",
"0.5471259",
"0.5464586"
] | 0.8257416 | 0 |
Test that when we have a TPA provider which as an explicit maximum session length set, waiting for longer than that between requests results in us being logged out. | def test_login_with_testshib_provider_short_session_length(self):
# Configure the provider with a 10-second timeout
self._configure_testshib_provider(max_session_length=10)
now = datetime.datetime.utcnow()
with freeze_time(now):
# Test the login flow, adding the user in the process
self._test_login()
# Wait 30 seconds; longer than the manually-set 10-second timeout
later = now + datetime.timedelta(seconds=30)
with freeze_time(later):
# Test returning as a logged in user; this method verifies that we're logged out first.
self._test_return_login(previous_session_timed_out=True) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_inactive_session_timeout(self):\r\n email, password = self.STUDENT_INFO[0]\r\n self.login(email, password)\r\n\r\n # make sure we can access courseware immediately\r\n resp = self.client.get(reverse('dashboard'))\r\n self.assertEquals(resp.status_code, 200)\r\n\r\n # then wait a bit and see if we get timed out\r\n time.sleep(2)\r\n\r\n resp = self.client.get(reverse('dashboard'))\r\n\r\n # re-request, and we should get a redirect to login page\r\n self.assertRedirects(resp, settings.LOGIN_REDIRECT_URL + '?next=' + reverse('dashboard'))",
"def test_inactive_session_timeout(self):\r\n self.create_account(self.username, self.email, self.pw)\r\n self.activate_user(self.email)\r\n\r\n self.login(self.email, self.pw)\r\n\r\n # make sure we can access courseware immediately\r\n course_url = '/course/'\r\n resp = self.client.get_html(course_url)\r\n self.assertEquals(resp.status_code, 200)\r\n\r\n # then wait a bit and see if we get timed out\r\n time.sleep(2)\r\n\r\n resp = self.client.get_html(course_url)\r\n\r\n # re-request, and we should get a redirect to login page\r\n self.assertRedirects(resp, settings.LOGIN_REDIRECT_URL + '?next=/course/')",
"def test_timeout_not_exceeded():\n connection = FakeBaseConnection(session_timeout=10)\n start = time.time()\n assert not connection._timeout_exceeded(start)",
"def testSessionTimeout(self):\n\n def testTimeout(res):\n self.failUnlessEqual(res.value.args[0], b'404')\n\n def testCBTimeout(res):\n # check for terminate if we expire\n terminate = res[0].getAttribute('type',False)\n self.failUnlessEqual(terminate, 'terminate')\n\n def sendTest():\n sd = self.send()\n sd.addCallback(testCBTimeout)\n sd.addErrback(testTimeout)\n return sd\n\n def testResend(res):\n self.failUnless(res[0].name=='body', 'Wrong element')\n s = self.b.service.sessions[self.sid]\n self.failUnless(s.inactivity==2,'Wrong inactivity value')\n self.failUnless(s.wait==2, 'Wrong wait value')\n return task.deferLater(reactor, s.wait+s.inactivity+1, sendTest)\n\n def testSessionCreate(res):\n self.failUnless(res[0].name=='body', 'Wrong element')\n self.failUnless(res[0].hasAttribute('sid'),'Not session id')\n self.sid = res[0]['sid']\n\n # send and wait\n sd = self.send()\n sd.addCallback(testResend)\n return sd\n\n\n\n BOSH_XML = \"\"\"<body content='text/xml; charset=utf-8'\n hold='1'\n rid='%(rid)i'\n to='localhost'\n route='xmpp:127.0.0.1:%(server_port)i'\n ver='1.6'\n wait='2'\n ack='1'\n inactivity='2'\n xml:lang='en'\n xmlns='http://jabber.org/protocol/httpbind'/>\n \"\"\"% { \"rid\": self.rid, \"server_port\": self.server_port }\n\n return self.proxy.connect(BOSH_XML).addCallbacks(testSessionCreate)",
"def test_timeout_invalid_start():\n connection = FakeBaseConnection(session_timeout=10)\n assert not connection._timeout_exceeded(start=0)",
"def test_session_timeout_without_abort(self, exp_factory):\n exp1 = exp_factory(sid=\"s1\", timeout=1)\n exp1.start()\n spec = SequentialSpec(\"a\", \"b\", nslots=1, name=\"test\")\n mm1 = MatchMaker(spec, exp=exp1)\n group1 = mm1.match_to(\"test\")\n\n assert group1.me.role == \"a\"\n assert group1.mm.quota.nopen == 0\n assert group1.mm.quota.npending == 1\n\n time.sleep(1)\n assert exp1.session_expired\n assert not exp1.aborted\n\n exp2 = exp_factory(sid=\"s2\")\n exp2.start()\n spec = SequentialSpec(\"a\", \"b\", nslots=1, name=\"test\")\n mm2 = MatchMaker(spec, exp=exp2)\n group2 = mm2.match_to(\"test\")\n\n assert group2.me.role == \"a\"\n assert not exp2.aborted\n\n exp3 = exp_factory(sid=\"s3\")\n exp3.start()\n spec = SequentialSpec(\"a\", \"b\", nslots=1, name=\"test\")\n mm3 = MatchMaker(spec, exp=exp3)\n mm3.match_to(\"test\")\n assert exp3.aborted",
"def test_timeout_exceeded():\n connection = FakeBaseConnection(session_timeout=10)\n start = time.time() - 11\n try:\n connection._timeout_exceeded(start)\n except NetmikoTimeoutException as exc:\n assert isinstance(exc, NetmikoTimeoutException)\n return\n\n assert False",
"def test_timeout(self):\n context = Context(SSLv23_METHOD)\n context.set_timeout(1234)\n assert context.get_timeout() == 1234",
"async def test_validate_session(api_client: TestClient, coresys: CoreSys):\n with patch(\"aiohttp.web_request.BaseRequest.__getitem__\", return_value=None):\n resp = await api_client.post(\n \"/ingress/validate_session\",\n json={\"session\": \"non-existing\"},\n )\n assert resp.status == 401\n\n with patch(\n \"aiohttp.web_request.BaseRequest.__getitem__\",\n return_value=coresys.homeassistant,\n ):\n resp = await api_client.post(\"/ingress/session\")\n result = await resp.json()\n\n assert \"session\" in result[\"data\"]\n session = result[\"data\"][\"session\"]\n assert session in coresys.ingress.sessions\n\n valid_time = coresys.ingress.sessions[session]\n\n resp = await api_client.post(\n \"/ingress/validate_session\",\n json={\"session\": session},\n )\n assert resp.status == 200\n assert await resp.json() == {\"result\": \"ok\", \"data\": {}}\n\n assert coresys.ingress.sessions[session] > valid_time",
"def test_wait_for_page_in_timeout(self):\n start_time = datetime.now()\n with self.assertRaises(SpdbError):\n csdb = CacheStateDB(self.config_data)\n ch = csdb.create_page_in_channel()\n\n csdb.wait_for_page_in([\"MY_TEST_KEY1\", \"MY_TEST_KEY2\"], ch, 1)\n\n assert (datetime.now() - start_time).seconds < 3",
"def test_retriable_session():\n total = 5\n backoff_factor = 0.5\n session = retriable_session(total, backoff_factor)\n assert len(session.adapters) == 2\n assert 'https://' in session.adapters\n assert 'http://' in session.adapters\n assert session.adapters['https://'] == session.adapters['http://']\n assert session.adapters['https://'].max_retries.total == total\n assert session.adapters['https://'].max_retries.backoff_factor == backoff_factor",
"def assert_timeout(self) -> None:",
"def check_correct_usage(no_datastore, cookie_only_threshold):\n def minitest_divider(test):\n logger.debug('\\n\\n' + '-'*50)\n logger.debug(test + ' (nd=%s cot=%s)' % (no_datastore, cookie_only_threshold))\n\n st = SessionTester(no_datastore=no_datastore, cookie_only_threshold=cookie_only_threshold)\n expected_num_sessions_in_db_if_db_used = lambda a,b=0 : generic_expected_num_sessions_in_db_if_db_used(st, no_datastore, cookie_only_threshold, a, b)\n st.verify_active_sessions_in_db(0)\n\n minitest_divider('try doing nothing (no session should be started)')\n st.noop()\n st.verify_active_sessions_in_db(0)\n\n minitest_divider('start a session with a single write')\n st.start_request()\n str(st)\n assert st.get_expiration()==0, \"no session yet => no expiration yet\"\n assert st.is_active() is False\n st['x'] = 7\n assert st.is_active() is True\n st.finish_request_and_check()\n expected_num_sessions_in_db_if_db_used(1)\n\n minitest_divider('start another session')\n st2 = SessionTester(st=st)\n st2.start_request()\n assert not st2.is_active()\n assert st2.get('x') is None, \"shouldn't get other session's data\"\n assert not st2.is_active(), \"still shouldn't be active - nothing set yet\"\n st2['x'] = 'st2x'\n assert st2.is_active()\n st2.finish_request_and_check()\n expected_num_sessions_in_db_if_db_used(2)\n\n minitest_divider('each session should get a unique sid')\n assert st2.ss.sid != st.ss.sid\n\n minitest_divider('we should still have the values we set earlier')\n st.start_request()\n str(st)\n assert_equal(st['x'], 7)\n st.finish_request_and_check()\n st2.start_request()\n assert_equal(st2['x'], 'st2x')\n st2.finish_request_and_check()\n\n minitest_divider(\"check get session by sid, save(True), and terminate()\")\n if cookie_only_threshold == 0:\n data1 = st.ss.data\n data2 = st2.ss.data\n else:\n # data is being stored in cookie-only form => won't be in the db\n data1 = data2 = {}\n resp = st.get_url('/get_by_sid?sid=%s' % st.ss.sid)\n assert_equal(pickle.loads(b64decode(resp.body)), data1)\n resp = st2.get_url('/get_by_sid?sid=%s' % st2.ss.sid)\n assert_equal(pickle.loads(b64decode(resp.body)), data2)\n expected_num_sessions_in_db_if_db_used(2)\n st.start_request()\n st['y'] = 9 # make the session dirty\n st.save(True) # force it to persist to the db even though it normally wouldn't\n st.finish_request_and_check()\n\n # now the data should be in the db\n resp = st.get_url('/get_by_sid?sid=%s' % st.ss.sid)\n assert_equal(pickle.loads(b64decode(resp.body)), st.ss.data)\n expected_num_sessions_in_db_if_db_used(2, 1)\n st.start_request()\n st.terminate() # remove it from the db\n st.finish_request_and_check()\n expected_num_sessions_in_db_if_db_used(1)\n\n minitest_divider(\"should be able to terminate() and then start a new session all in one request\")\n st.start_request()\n st['y'] = 'yy'\n assert_equal(st.get('y'), 'yy')\n st.terminate()\n assert_raises(KeyError, st.__getitem__, 'y')\n st['x'] = 7\n st.finish_request_and_check()\n expected_num_sessions_in_db_if_db_used(2)\n\n minitest_divider(\"regenerating SID test\")\n initial_sid = st.ss.sid\n st.start_request()\n initial_expir = st.get_expiration()\n st.regenerate_id()\n assert_equal(st['x'], 7, \"data should not be affected\")\n st.finish_request_and_check()\n assert_not_equal(initial_sid, st.ss.sid, \"regenerated sid should be different\")\n assert_equal(initial_expir, st._get_expiration(), \"expiration should not change\")\n st.start_request()\n assert_equal(st['x'], 7, \"data should not be affected\")\n st.finish_request_and_check()\n expected_num_sessions_in_db_if_db_used(2)\n\n minitest_divider(\"regenerating SID test w/new expiration time\")\n initial_sid = st.ss.sid\n st.start_request()\n initial_expir = st.get_expiration()\n new_expir = initial_expir + 120 # something new\n st.regenerate_id(expiration_ts=new_expir)\n assert_equal(st['x'], 7, \"data should not be affected\")\n st.finish_request_and_check()\n assert_not_equal(initial_sid, st.ss.sid, \"regenerated sid should be different\")\n assert_equal(new_expir, st._get_expiration(), \"expiration should be what we asked for\")\n st.start_request()\n assert_equal(st['x'], 7, \"data should not be affected\")\n st.finish_request_and_check()\n expected_num_sessions_in_db_if_db_used(2)\n\n minitest_divider(\"check basic dictionary operations\")\n st.start_request()\n st['s'] = 'aaa'\n st['i'] = 99\n st['f'] = 4.37\n assert_equal(st.pop('s'), 'aaa')\n assert_equal(st.pop('s'), None)\n assert_equal(st.pop('s', 'nil'), 'nil')\n assert st.has_key('i')\n assert not st.has_key('s')\n assert_equal(st.get('i'), 99)\n assert_equal(st.get('ii'), None)\n assert_equal(st.get('iii', 3), 3)\n assert_equal(st.get('f'), st['f'])\n del st['f']\n assert_raises(KeyError, st.__getitem__, 'f')\n assert 'f' not in st\n assert 'i' in st\n assert_equal(st.get('x'), 7)\n st.clear()\n assert 'i' not in st\n assert 'x' not in st\n st.finish_request_and_check()\n\n minitest_divider(\"add complex data (models and objects) to the session\")\n st.start_request()\n st['model'] = make_entity(0)\n st['dict'] = dict(a='alpha', c='charlie', e='echo')\n st['list'] = ['b', 'd', 'f']\n st['set'] = set([2, 3, 5, 7, 11, 13, 17, 19])\n st['tuple'] = (7, 7, 1985)\n st.finish_request_and_check()\n st.start_request()\n st.clear()\n st.finish_request_and_check()\n\n minitest_divider(\"test quick methods: basic usage\")\n st.start_request()\n st.set_quick('msg', 'mc only!')\n assert_equal('mc only!', st['msg'])\n st.finish_request_and_check()\n st.start_request()\n assert_equal('mc only!', st.pop_quick('msg'))\n assert_raises(KeyError, st.__getitem__, 'msg')\n st.finish_request_and_check()\n\n minitest_divider(\"test quick methods: flush memcache (value will be lost if not using cookies)\")\n st.start_request()\n st.set_quick('a', 1)\n st.set_quick('b', 2)\n st.finish_request_and_check()\n st.flush_memcache()\n st.start_request()\n if cookie_only_threshold > 0:\n assert_equal(st['a'], 1)\n assert_equal(st['b'], 2)\n else:\n assert_raises(KeyError, st.__getitem__, 'a')\n assert_raises(KeyError, st.__getitem__, 'b')\n st.finish_request_and_check()\n\n minitest_divider(\"test quick methods: flush memcache should have no impact if another mutator is also used (and this ISNT memcache-only)\")\n st.start_request()\n st['x'] = 24\n st.set_quick('a', 1)\n st.finish_request_and_check()\n st.flush_memcache()\n st.start_request()\n if no_datastore and cookie_only_threshold == 0:\n assert_raises(KeyError, st.__getitem__, 'a')\n assert_raises(KeyError, st.__getitem__, 'x')\n else:\n assert_equal(st['a'], 1)\n assert_equal(st['x'], 24)\n st.set_quick('msg', 'hello')\n st['z'] = 99\n st.finish_request_and_check()",
"def sessiontimeout(self) :\n\t\ttry :\n\t\t\treturn self._sessiontimeout\n\t\texcept Exception as e:\n\t\t\traise e",
"def test_timeout(self):\n # Uses a mocked version of EmailActivationTokenGenerator\n # so we can change the value of 'today'\n class Mocked(EmailActivationTokenGenerator):\n def __init__(self, today):\n self._today_val = today\n\n def _today(self):\n return self._today_val\n\n user = self.create_user()\n token_generator = EmailActivationTokenGenerator()\n token = token_generator.make_token(user)\n\n p1 = Mocked(date.today() + timedelta(settings.USERS_EMAIL_CONFIRMATION_TIMEOUT_DAYS))\n self.assertTrue(p1.check_token(user, token))\n\n p2 = Mocked(date.today() + timedelta(settings.USERS_EMAIL_CONFIRMATION_TIMEOUT_DAYS + 1))\n self.assertFalse(p2.check_token(user, token))",
"def test_max_cookie_length(self):\n storage = self.get_storage()\n response = self.get_response()\n\n for i in range(5):\n storage.add(str(i) * 900)\n unstored_messages = storage.update(response)\n\n cookie_storing = self.stored_messages_count(storage, response)\n self.assertEqual(cookie_storing, 4)\n\n self.assertEqual(len(unstored_messages), 1)\n self.assert_(unstored_messages[0].message == '0' * 900)",
"def test_pool_timeout_hw(self):\n self.test_pool_timeout()",
"def test_api_livesession_video_no_stopped_at_cache_has_timeout(\n self,\n ):\n # set the start at current time minus 30 seconds\n started = int(to_timestamp(timezone.now())) - 30\n video = VideoFactory(\n live_state=RUNNING,\n live_info={\"started_at\": str(started)},\n live_type=JITSI,\n )\n\n livesession = LiveSessionFactory(\n consumer_site=video.playlist.consumer_site,\n email=\"samia@test-fun-mooc.fr\",\n live_attendance={started + 10: {\"onStage\": 0}, started + 20: {\"muted\": 0}},\n lti_id=str(video.playlist.lti_id),\n lti_user_id=\"56255f3807599c377bf0e5bf072359fd\",\n video=video,\n )\n\n # token with right context_id and lti_user_id\n jwt_token = InstructorOrAdminLtiTokenFactory(\n playlist=video.playlist,\n consumer_site=str(video.playlist.consumer_site.id),\n context_id=str(video.playlist.lti_id),\n )\n livesession.refresh_from_db()\n with self.assertNumQueries(3):\n response = self.client.get(\n self._get_url(video),\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(response.status_code, 200)\n response_json = {\n \"count\": 1,\n \"next\": None,\n \"previous\": None,\n \"results\": [\n {\n \"id\": str(livesession.id),\n \"display_name\": \"samia@test-fun-mooc.fr\",\n \"is_registered\": False,\n \"live_attendance\": {\n str(started): {},\n str(started + 15): {\"onStage\": 0},\n str(started + 30): {\"muted\": 0},\n },\n }\n ],\n }\n self.assertEqual(response.json(), response_json)\n\n with self.assertNumQueries(0):\n response = self.client.get(\n self._get_url(video),\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.json(), response_json)\n\n # go over the cache limit\n new_time = timezone.now() + timedelta(\n settings.VIDEO_ATTENDANCES_CACHE_DURATION + 1\n )\n with mock.patch.object(\n timezone, \"now\", return_value=new_time\n ), mock.patch.object(time, \"time\", return_value=int(to_timestamp(new_time))):\n # we call again the same request,\n # results are not identical\n with self.assertNumQueries(3):\n response = self.client.get(\n self._get_url(video),\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n\n self.assertEqual(response.status_code, 200)\n self.assertNotEqual(response.json(), response_json)",
"def test_serverTimesOut(self):\n c = Clock()\n self.server.callLater = c.callLater\n\n def login():\n return self.client.login(b'testuser', b'password-test')\n\n def expireTime():\n c.advance(self.server.POSTAUTH_TIMEOUT * 2)\n\n d = self.connected.addCallback(strip(login))\n d.addCallback(strip(expireTime))\n\n # The loopback method's Deferred fires the connection is\n # closed, and the server closes the connection as a result of\n # expireTime.\n return defer.gatherResults([d, self.loopback()])",
"def test_polling_plugin_timeout(self):\n pass",
"def test_getallsessions_reliability(self):\n for _ in range(100):\n sessions = AudioUtilities.GetAllSessions()\n assert len(sessions) > 0",
"def test_expires(self):\n storage = SessionStorage(timeout=0.001)\n session1 = storage['key']\n session1['value'] = 'example'\n session2 = storage['key']\n self.assertEquals('example', session2['value'])\n\n time.sleep(0.001)\n\n session3 = storage['key']\n self.assertNotIn('value', session3)",
"def test_aio_can_login_to_web_portal(aio):",
"def TODO_testTimeout(self):\n return \"\"\"TODO: Highly dependent on hardcoded downstream timeout val\"\"\"\n\n # Assuming proxy's downstream_max is 1,\n # and number of threads is 1.\n\n self.client_connect(0)\n\n self.client_send('get time0\\r\\n', 0)\n self.mock_recv('get time0\\r\\n', 0)\n\n # Mock server is 'busy' at this point, so\n # downstream timeout logic should kick in,\n # without our mock server having to send anything.\n\n self.wait(210)\n\n self.client_recv('END\\r\\n', 0)\n\n # TODO: The number of server sessions should be 0,\n # except the close might not have propagated.",
"def test_expired_pipeline(self):\n data = self.data()\n # provider is sent along request when request is made from mobile application\n data.pop(\"provider\")\n # to identify that request is made using browser\n data.update({\"social_auth_provider\": \"Google\"})\n response = self.client.post(self.url, data)\n self._assert_third_party_session_expired_error(\n response,\n \"Registration using {provider} has timed out.\".format(provider=\"Google\")\n )\n self._verify_user_existence(user_exists=False, social_link_exists=False)",
"def test_timeout(self):\n session_id = self._open_session()\n\n # No alert to begin with\n alerts = HostContactAlert.filter_by_item(self.host)\n self.assertEqual(alerts.count(), 0)\n\n time.sleep(HostState.CONTACT_TIMEOUT + HostStatePoller.POLL_INTERVAL + RABBITMQ_GRACE_PERIOD)\n\n # Should be one SESSION_TERMINATE message to AMQP with a matching session ID\n message = self._receive_one_amqp()\n self.assertDictEqual(\n message,\n {\n \"fqdn\": self.CLIENT_NAME,\n \"type\": \"SESSION_TERMINATE\",\n \"plugin\": self.PLUGIN,\n \"session_seq\": None,\n \"session_id\": session_id,\n \"body\": None,\n },\n )\n\n alerts = HostContactAlert.filter_by_item(self.host)\n self.assertEqual(alerts.count(), 1)\n\n # Should be a message waiting for the agent telling it that its session was terminated\n # (timing out doesn't mean the agent is gone, it could just be experiencing network difficulties)\n # What's more, the agent doesn't necessarily *know* that it had network difficulties, e.g. if it\n # just got real slow and waited too long between GETs.\n # This has to cut both ways to be reliable:\n # * We have to tell the agent that we thought it went away, by sending a TERMINATE for sessions\n # * If the agent finds that a GET fails then it has to assume that we might have put session\n # messages in that GET, and terminate all its sessions in case one of those GET messages\n # was really a TERMINATE\n response = self._get()\n self.assertResponseOk(response)\n forwarded_messages = response.json()[\"messages\"]\n self.assertEqual(len(forwarded_messages), 1)\n self.assertDictEqual(\n forwarded_messages[0],\n {\n \"fqdn\": self.CLIENT_NAME,\n \"type\": \"SESSION_TERMINATE\",\n \"plugin\": self.PLUGIN,\n \"session_seq\": None,\n \"session_id\": None,\n \"body\": None,\n },\n )",
"def onLoginTimeOut(self):\r\n\r\n self.pros +=1\r\n self.pb_load.setValue(self.pros * 4)\r\n # login timeout error\r\n if(self.pros == 25):\r\n self.check_timer.stop()",
"def test_timeout(self):\n # Attempt connection with short timeout\n with self.assertRaises(requests.exceptions.ReadTimeout):\n a = api.InvenTreeAPI(SERVER, username=USERNAME, password=PASSWORD, timeout=0.001) # noqa: F841",
"def test_set_session():",
"def get_test_timeout(self):\n return None"
] | [
"0.67007613",
"0.6563989",
"0.6529781",
"0.63517386",
"0.6243885",
"0.6232972",
"0.6106514",
"0.6026753",
"0.5952208",
"0.59390306",
"0.5926259",
"0.58408374",
"0.58186436",
"0.5785342",
"0.57843",
"0.57781994",
"0.57300466",
"0.572285",
"0.57226133",
"0.5715555",
"0.57084894",
"0.56952065",
"0.56734866",
"0.5662905",
"0.56319326",
"0.5631651",
"0.56138325",
"0.5601006",
"0.5587145",
"0.55706555"
] | 0.77491003 | 0 |
Mock an error response when calling the OData API for user details. | def _mock_odata_api_for_error(self, odata_api_root_url, username):
def callback(request, uri, headers): # lint-amnesty, pylint: disable=unused-argument
"""
Return a 500 error when someone tries to call the URL.
"""
headers['CorrelationId'] = 'aefd38b7-c92c-445a-8c7a-487a3f0c7a9d'
headers['RequestNo'] = '[787177]' # This is the format SAPSF returns for the transaction request number
return 500, headers, 'Failure!'
fields = ','.join(SapSuccessFactorsIdentityProvider.default_field_mapping.copy())
url = '{root_url}User(userId=\'{user_id}\')?$select={fields}'.format(
root_url=odata_api_root_url,
user_id=username,
fields=fields,
)
httpretty.register_uri(httpretty.GET, url, body=callback, content_type='application/json')
return url | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_response_error(self):\n r = mock.Mock(spec=requests.Response)\n r.content = \"{'normal': 'resource'}\"\n\n f = Fitbit(**self.client_kwargs)\n f.client._request = lambda *args, **kwargs: r\n\n r.status_code = 404\n self.assertRaises(exceptions.HTTPNotFound, f.user_profile_get)\n\n r.status_code = 409\n self.assertRaises(exceptions.HTTPConflict, f.user_profile_get)\n\n r.status_code = 500\n self.assertRaises(exceptions.HTTPServerError, f.user_profile_get)\n\n r.status_code = 499\n self.assertRaises(exceptions.HTTPBadRequest, f.user_profile_get)",
"def test_api_user_get(self):\n pass",
"def test_response_auth(self):\n r = mock.Mock(spec=requests.Response)\n r.status_code = 401\n r.content = \"{'normal': 'resource'}\"\n\n f = Fitbit(**self.client_kwargs)\n f.client._request = lambda *args, **kwargs: r\n\n self.assertRaises(exceptions.HTTPUnauthorized, f.user_profile_get)\n\n r.status_code = 403\n self.assertRaises(exceptions.HTTPForbidden, f.user_profile_get)",
"def test_004_get_user_not_found(self, mock_db_query):\n mock_db_query.get.return_value = None\n\n response = self.app.get('/v1/users/0', headers={'accept': 'application/json'})\n\n print(response.get_data().decode())\n\n self.assertEqual(response.status_code, 404)\n self.assertIn('User not found', response.get_data().decode())",
"def test_broken_odata_details(self, mock_response):\n message = {\n \"error\": {\n \"code\": \"Conflict\",\n \"message\": \"The maximum number of Free ServerFarms allowed in a Subscription is 10.\",\n \"target\": None,\n \"details\": [\n {\"message\": \"The maximum number of Free ServerFarms allowed in a Subscription is 10.\"},\n {\"code\": \"Conflict\"},\n {\n \"errorentity\": {\n \"code\": \"Conflict\",\n \"message\": \"The maximum number of Free ServerFarms allowed in a Subscription is 10.\",\n \"extendedCode\": \"59301\",\n \"messageTemplate\": \"The maximum number of {0} ServerFarms allowed in a Subscription is {1}.\",\n \"parameters\": [\"Free\", \"10\"],\n \"innerErrors\": None,\n }\n },\n ],\n \"innererror\": None,\n }\n }\n exp = HttpResponseError(response=mock_response(json.dumps(message).encode(\"utf-8\")))\n assert exp.error.code == \"Conflict\"",
"def test_search_user_fail_on_non_200_response(self) -> None:\n responses.add(responses.GET, local_app.config['SEARCHSERVICE_BASE'] + SEARCH_USER_ENDPOINT,\n json=self.mock_search_table_results, status=HTTPStatus.INTERNAL_SERVER_ERROR)\n\n with local_app.test_client() as test:\n response = test.get('/api/search/v0/user', query_string=dict(query='test', page_index='0'))\n self.assertEqual(response.status_code, HTTPStatus.INTERNAL_SERVER_ERROR)",
"def test_get_user_fail_unauthorised():\n\n client = APIClient()\n\n response = client.get(reverse(\"user-detail\"), format=\"json\")\n assert response.status_code == status.HTTP_401_UNAUTHORIZED",
"def test_003_get_user(self, mock_db_query):\n mock_db_query.get.return_value = seller1\n\n response = self.app.get('/v1/users/' + str(seller1.identity), headers={'accept': 'application/json'})\n\n print(response.get_data().decode())\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.json['first_name'], 'Lisa')",
"async def test_bad_retrieve_user_data(self, m):\n with self.assertRaises(aiohttp.web_exceptions.HTTPInternalServerError):\n await retrieve_user_data(\"bad_token\")",
"def test_user_info_without_header(self, app, auth_user):\n data = UserInfo.random()\n us_info = app.user_info.add_user_info(data=data, user_id=auth_user.uuid,\n header=None, type_response=AuthInvalidResponse)\n assert us_info.status_code == 401, \"Check status code\"\n assert us_info.data.description == ResponseText.DESCRIPTION_AUTH_ERROR\n assert us_info.data.error == ResponseText.ERROR_AUTH_TEXT\n assert us_info.data.status_code == 401, \"Check status code\"",
"def test_user_retrieve(self):\n self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.get_token())\n response = self.client.get(reverse(\"account:user-profile\"))\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data.get('user').get('email'), \"testuser@gmail.com\")",
"def test_unavailable_introspection_endpoint(self) -> None:\n request = Mock(args={})\n request.args[b\"access_token\"] = [b\"mockAccessToken\"]\n request.requestHeaders.getRawHeaders = mock_getRawHeaders()\n\n # The introspection endpoint is returning an error.\n self.http_client.request = AsyncMock(\n return_value=FakeResponse(code=500, body=b\"Internal Server Error\")\n )\n error = self.get_failure(self.auth.get_user_by_req(request), SynapseError)\n self.assertEqual(error.value.code, 503)\n\n # The introspection endpoint request fails.\n self.http_client.request = AsyncMock(side_effect=Exception())\n error = self.get_failure(self.auth.get_user_by_req(request), SynapseError)\n self.assertEqual(error.value.code, 503)\n\n # The introspection endpoint does not return a JSON object.\n self.http_client.request = AsyncMock(\n return_value=FakeResponse.json(\n code=200, payload=[\"this is an array\", \"not an object\"]\n )\n )\n error = self.get_failure(self.auth.get_user_by_req(request), SynapseError)\n self.assertEqual(error.value.code, 503)\n\n # The introspection endpoint does not return valid JSON.\n self.http_client.request = AsyncMock(\n return_value=FakeResponse(code=200, body=b\"this is not valid JSON\")\n )\n error = self.get_failure(self.auth.get_user_by_req(request), SynapseError)\n self.assertEqual(error.value.code, 503)",
"def test_unknown_user(self):\n self.sign_in()\n response = self.client.get(reverse('backend:user_details', args=(0,)))\n self.assertEqual(response.status_code, 404)",
"def test_call_httperror(self):\n\n with Client('username', 'password') as client:\n self.setSessionResponse(500)\n with self.assertRaises(APIError):\n data = client.call(**self.build_parameters)",
"def test_detail_requests_after_authentication(self):\n print(f'cls.user1={self.user1}')\n user_detail_url = reverse('user-detail',kwargs={'pk':1})\n self.token = Token.objects.create(user=self.user1)\n self.client.credentials(HTTP_AUTHORIZATION='Token '+self.token.key)\n response = self.client.get(user_detail_url)\n self.assertEqual(response.status_code,status.HTTP_200_OK)\n\n response_patch = self.client.patch(user_detail_url,{\n 'username': 'random_user', 'password': 'passwrodaosida123'\n })\n print(f'response_patch data={response_patch.data}')\n self.assertEqual(response_patch.data,\n {'id': 1, 'username': 'random_user', 'first_name': 'testuser', 'last_name': 'rajula', 'email': ''})\n self.assertEqual(response_patch.status_code,status.HTTP_200_OK)\n\n response = self.client.get(user_detail_url)\n self.assertEqual(response.status_code,status.HTTP_200_OK)\n self.assertEqual(response.data['username'],'random_user')",
"def test_request_users_user(self):\n response = requests.get(self.url + '/users/John')\n\n self.assertEqual(response.status_code, 200)\n self.assertIsNone(response.json())",
"def test_sees_error_message_if_username_doesnt_exist(self):\n response = self.app.post(\n \"/api/users/login\",\n data=json.dumps(\n dict(\n email=USER_DATA[\"email\"] + \"x\",\n password=USER_DATA[\"credential1\"],\n )\n ),\n content_type=\"application/json\",\n follow_redirects=True,\n )\n res = response.data.decode(\"ASCII\")\n res = json.loads(res)\n self.assertEqual(response.status_code, 401)\n self.assertEqual(\n res[\"message\"], \"Invalid email, Please try again\"\n )",
"def test_show_failure(self, mock_get):\n mock_response = Mock(name='response')\n mock_response.json.side_effect = ValueError('No JSON object could be decoded')\n mock_get.return_value = mock_response\n\n with self.assertRaises(ValueError):\n # Call the method\n self.policies.show(id=333114)",
"def test_no_user(self):\n self.request.user = None\n result = user_id_get_view(self.request)['d']\n self.assertEqual(result, error_dict('api_errors', 'not authenticated for this request'))",
"def test_unauthenticated_user_denial(self):\n\n self.response = self.client.get(\"/api/users/users_list/\")\n self.assertEqual(self.response.status_code, status.HTTP_403_FORBIDDEN)\n self.assertEqual(\n 'Authentication credentials were not provided.', self.response.data['detail'])",
"def test_lti20_request_handler_bad_user(self):\r\n self.setup_system_xmodule_mocks_for_lti20_request_test()\r\n self.system.get_real_user = Mock(return_value=None)\r\n mock_request = self.get_signed_lti20_mock_request(self.GOOD_JSON_PUT)\r\n response = self.xmodule.lti_2_0_result_rest_handler(mock_request, \"user/abcd\")\r\n self.assertEqual(response.status_code, 404)",
"def test_get_sdb_id_invalid_response(self, mget):\n data = json.dumps({\"error_id\": \"123\", \"errors\": []})\n mget.return_value = self._mock_response(status=401, content=data)\n with self.assertRaises(CerberusClientException):\n self.client.get_sdb_id('some_id')",
"def test_fetch_user(self):\n\n self.register_user()\n\n self.assertEqual(self.fetch_user_details().status_code, 200)\n\n self.assertTrue(self.fetch_user_details(\n ).json[\"data\"][0][\"username\"] == 'Bjorn')",
"def test_get_single_user_is_missing(self):\n add_user(\"neilb\", \"neilb14@mailinator.com\")\n with self.client:\n response = self.client.get('/users/999')\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 404)\n self.assertIn('User does not exist', data['message'])\n self.assertIn('fail', data['status'])",
"def test_unauthorized_user(self):\n response_decoded_json = requests.post(URL_AUTH['url_login'], \n data=json.dumps(AUTH_PAYLOADS['payload_unauth']),\n headers=HEADER['header'])\n mes = response_decoded_json.json()\n assert 400 == response_decoded_json.status_code, \"You have BAD REQUEST\"\n assert \"User not found\" == mes, \"There is unexpected ability to login as unknown user\"",
"def test_response_ok(self):\n r = mock.Mock(spec=requests.Response)\n r.status_code = 200\n r.content = '{\"normal\": \"resource\"}'\n\n f = Fitbit(**self.client_kwargs)\n f.client._request = lambda *args, **kwargs: r\n f.user_profile_get()\n\n r.status_code = 202\n f.user_profile_get()\n\n r.status_code = 204\n f.user_profile_get()",
"def test_retrive_user(self):\n res = self.client.get(ME_URL)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(res.data['email'], self.user.email)\n self.assertEqual(res.data['name'], self.user.name)\n self.assertNotIn('password', res.data)",
"def test_login_success_no_userinfo(self):\n UserInfo.objects.filter(user=self.user).delete()\n resp = self.client.post(\n reverse('login'),\n json.dumps({\n \"username\": self.USERNAME,\n \"password\": self.PASSWORD,\n }),\n content_type=\"application/json\"\n )\n assert resp.status_code == 200, resp.content.decode('utf-8')\n json_data = json.loads(resp.content.decode('utf-8'))\n assert json_data['name'] == self.user.email",
"def test_obtain_issues_response_error(self, mock_error, mock_url_read):\n mock_url_read.return_value = 'non-json'\n self.__report.obtain_issues(['id'], 'high')\n issues = self.__report.issues()\n self.assertIsInstance(issues, List)\n self.assertEqual(len(issues), 0)\n self.assertEqual(mock_error.call_args[0][0], \"Error loading json: %s.\")\n self.assertIsInstance(mock_error.call_args[0][1], ValueError)",
"def test_get_single_user_no_id(self):\n add_user(\"neilb\", \"neilb14@mailinator.com\")\n with self.client:\n response = self.client.get('/users/blah')\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 404)\n self.assertIn('User does not exist', data['message'])\n self.assertIn('fail', data['status'])"
] | [
"0.68724155",
"0.6683057",
"0.6637301",
"0.6578398",
"0.6559852",
"0.6474468",
"0.6464813",
"0.6425861",
"0.638332",
"0.63573205",
"0.6336519",
"0.6328632",
"0.63244635",
"0.62987155",
"0.6267484",
"0.62195075",
"0.61980534",
"0.6191649",
"0.61694646",
"0.614538",
"0.61373806",
"0.61093605",
"0.60893244",
"0.6086845",
"0.60762435",
"0.60732096",
"0.6070228",
"0.6063439",
"0.6052495",
"0.60500985"
] | 0.73752326 | 0 |
Configure the provider such that it can talk to a mockedout version of the SAP SuccessFactors API, and ensure that the data it gets that way gets passed to the registration form. Check that value mappings overrides work in cases where we override a value other than what we're looking for, and when an empty override is provided (expected behavior is that existing value maps will be left alone). | def test_register_sapsf_metadata_present_override_relevant_value(self):
value_map = {'country': {'Australia': 'NZ'}}
expected_country = 'NZ'
provider_settings = {
'sapsf_oauth_root_url': 'http://successfactors.com/oauth/',
'sapsf_private_key': 'fake_private_key_here',
'odata_api_root_url': 'http://api.successfactors.com/odata/v2/',
'odata_company_id': 'NCC1701D',
'odata_client_id': 'TatVotSEiCMteSNWtSOnLanCtBGwNhGB',
}
if value_map:
provider_settings['sapsf_value_mappings'] = value_map
self._configure_testshib_provider(
identity_provider_type='sap_success_factors',
metadata_source=TESTSHIB_METADATA_URL,
other_settings=json.dumps(provider_settings)
)
self._test_register(country=expected_country) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_register_sapsf_metadata_present_override_other_value(self):\n value_map = {'country': {'United States': 'blahfake'}}\n expected_country = 'AU'\n provider_settings = {\n 'sapsf_oauth_root_url': 'http://successfactors.com/oauth/',\n 'sapsf_private_key': 'fake_private_key_here',\n 'odata_api_root_url': 'http://api.successfactors.com/odata/v2/',\n 'odata_company_id': 'NCC1701D',\n 'odata_client_id': 'TatVotSEiCMteSNWtSOnLanCtBGwNhGB',\n }\n if value_map:\n provider_settings['sapsf_value_mappings'] = value_map\n\n self._configure_testshib_provider(\n identity_provider_type='sap_success_factors',\n metadata_source=TESTSHIB_METADATA_URL,\n other_settings=json.dumps(provider_settings)\n )\n self._test_register(country=expected_country)",
"def test_register_sapsf_metadata_present_empty_value_override(self):\n\n value_map = {'country': {}}\n expected_country = 'AU'\n provider_settings = {\n 'sapsf_oauth_root_url': 'http://successfactors.com/oauth/',\n 'sapsf_private_key': 'fake_private_key_here',\n 'odata_api_root_url': 'http://api.successfactors.com/odata/v2/',\n 'odata_company_id': 'NCC1701D',\n 'odata_client_id': 'TatVotSEiCMteSNWtSOnLanCtBGwNhGB',\n }\n if value_map:\n provider_settings['sapsf_value_mappings'] = value_map\n\n self._configure_testshib_provider(\n identity_provider_type='sap_success_factors',\n metadata_source=TESTSHIB_METADATA_URL,\n other_settings=json.dumps(provider_settings)\n )\n self._test_register(country=expected_country)",
"def test_register_sapsf_with_value_default(self):\n # Mock the call to the SAP SuccessFactors OData user endpoint\n ODATA_USER_URL = (\n 'http://api.successfactors.com/odata/v2/User(userId=\\'myself\\')'\n '?$select=firstName,country,lastName,defaultFullName,email'\n )\n\n def user_callback(request, _uri, headers):\n auth_header = request.headers.get('Authorization')\n assert auth_header == 'Bearer faketoken'\n return (\n 200,\n headers,\n json.dumps({\n 'd': {\n 'username': 'jsmith',\n 'firstName': 'John',\n 'lastName': 'Smith',\n 'defaultFullName': 'John Smith',\n 'country': 'Australia'\n }\n })\n )\n\n httpretty.register_uri(httpretty.GET, ODATA_USER_URL, content_type='application/json', body=user_callback)\n\n provider_settings = {\n 'sapsf_oauth_root_url': 'http://successfactors.com/oauth/',\n 'sapsf_private_key': 'fake_private_key_here',\n 'odata_api_root_url': 'http://api.successfactors.com/odata/v2/',\n 'odata_company_id': 'NCC1701D',\n 'odata_client_id': 'TatVotSEiCMteSNWtSOnLanCtBGwNhGB',\n }\n\n self._configure_testshib_provider(\n identity_provider_type='sap_success_factors',\n metadata_source=TESTSHIB_METADATA_URL,\n other_settings=json.dumps(provider_settings),\n default_email='default@testshib.org'\n )\n self.USER_EMAIL = 'default@testshib.org'\n self._test_register()",
"def test_register_insufficient_sapsf_metadata(self):\n self._configure_testshib_provider(\n identity_provider_type='sap_success_factors',\n metadata_source=TESTSHIB_METADATA_URL,\n other_settings='{\"key_i_dont_need\":\"value_i_also_dont_need\"}',\n )\n # Because we're getting details from the assertion, fall back to the initial set of details.\n self.USER_EMAIL = \"myself@testshib.org\"\n self.USER_NAME = \"Me Myself And I\"\n self.USER_USERNAME = \"myself\"\n self._test_register()",
"def _configure_testshib_provider(self, **kwargs):\n fetch_metadata = kwargs.pop('fetch_metadata', True)\n assert_metadata_updates = kwargs.pop('assert_metadata_updates', True)\n kwargs.setdefault('name', self.PROVIDER_NAME)\n kwargs.setdefault('enabled', True)\n kwargs.setdefault('visible', True)\n kwargs.setdefault(\"backend_name\", \"tpa-saml\")\n kwargs.setdefault('slug', self.PROVIDER_IDP_SLUG)\n kwargs.setdefault('entity_id', TESTSHIB_ENTITY_ID)\n kwargs.setdefault('metadata_source', TESTSHIB_METADATA_URL)\n kwargs.setdefault('icon_class', 'fa-university')\n kwargs.setdefault('attr_email', 'urn:oid:1.3.6.1.4.1.5923.1.1.1.6') # eduPersonPrincipalName\n kwargs.setdefault('max_session_length', None)\n kwargs.setdefault('send_to_registration_first', False)\n kwargs.setdefault('skip_email_verification', False)\n saml_provider = self.configure_saml_provider(**kwargs) # pylint: disable=no-member\n\n if fetch_metadata:\n assert httpretty.is_enabled() # lint-amnesty, pylint: disable=no-member\n num_total, num_skipped, num_attempted, num_updated, num_failed, failure_messages = fetch_saml_metadata()\n if assert_metadata_updates:\n assert num_total == 1 # lint-amnesty, pylint: disable=no-member\n assert num_skipped == 0 # lint-amnesty, pylint: disable=no-member\n assert num_attempted == 1 # lint-amnesty, pylint: disable=no-member\n assert num_updated == 1 # lint-amnesty, pylint: disable=no-member\n assert num_failed == 0 # lint-amnesty, pylint: disable=no-member\n assert len(failure_messages) == 0 # lint-amnesty, pylint: disable=no-member\n return saml_provider",
"def test_mocked_api_set_value(self):\n c = Client()\n response = c.get(\"/apimock/mocked/api/account/45/?format=json\")\n self.assertEqual(response.status_code, 200)\n self.assertIn('{\"value\": \"There is a product bought\"}',\n response.content)\n response = c.post(\n \"/apimock/mocked/api/account/45/?format=json\", data={\"PLN\": 100})\n self.assertEqual(response.status_code, 200)\n self.assertIn('{\"PLN\": \"100\"}', response.content)\n response = c.get(\"/apimock/mocked/api/account/45/?format=json\")\n self.assertIn('{\"PLN\": \"100\"}', response.content)",
"def test_mocked_api_set_new_value(self):\n c = Client()\n response = c.get(self.patch_url)\n self.assertEqual(response.status_code, 200)\n self.assertIn('{\"account\": 157}',\n response.content)\n response = c.patch(\n self.patch_url, data={\"PLN\": 20, \"EURO\": 20})\n self.assertEqual(response.status_code, 200)\n self.assertIn('{\"PLN\": 20, \"account\": 157, \"EURO\": 20}',\n response.content)\n response = c.get(self.patch_url)\n self.assertIn('{\"PLN\": 20, \"account\": 157, \"EURO\": 20}',\n response.content)",
"def test_entities__FieldCustomization__set_value__2(address_book):\n fc = IFieldCustomization(address_book)\n field = IAddressBook['time_zone']\n fc.set_value(field, u'label', u'Default time zone value 123')\n fc.set_value(field, u'label', None)\n with pytest.raises(KeyError):\n fc.get_value(field, 'label')",
"def set_provider_defaults(provider: str, config: Dict[str, Any]) -> None:\n\n class SetProviderDefaults:\n @st.hookimpl\n def get_provider_config(self, name, params, registry):\n if name != provider:\n return None\n conf = config.copy()\n conf.update(params)\n return conf\n\n st.registry.register(SetProviderDefaults())",
"def test_configure_override_field_values(ExampleComponentClass):\n\n x = ExampleComponentClass()\n configure(x, {\"a\": 0, \"b\": \"bar\"})\n assert x.a == 0\n assert x.b == \"bar\"",
"def test_entities__NoFieldCustomization__set_value__1(root_folder):\n nfc = IFieldCustomization(root_folder)\n with pytest.raises(NotImplementedError):\n nfc.set_value(IAddressBook['time_zone'], u'label', u'foo')",
"def setup_provider(self):\n pass",
"def test_raises_set_alt_data(self):\n name, value = 'generic_field', 'alt_data_value'\n field = self.form.fields.get(name, None)\n self.assertIsNotNone(field, \"Unable to find the expected field in current fields. \")\n data = {name: (field, value)}\n\n with self.assertRaises(ImproperlyConfigured):\n self.form.set_alt_data(data=data, name=name, field=field, value=value)",
"def _set_result_mapping(self, provider_name, mapping):\n provider_mapping = self._result_mappings.setdefault(provider_name, {})\n if mapping:\n provider_mapping.update(mapping)\n # Ensure the reverse mapping/index is updated (for faster lookups).\n for name, index in provider_mapping.items():\n entries = self._reverse_mapping.setdefault(name, [])\n provider = _Provider(provider_name, index)\n if provider not in entries:\n entries.append(provider)",
"def test_set_default(self):\n result = self.param_dict.get_config()\n self.assertEquals(result[\"foo\"], None)\n self.param_dict.set_default(\"foo\")\n self.assertEquals(self.param_dict.get(\"foo\"), 10)\n self.param_dict.update(\"foo=1000\")\n self.assertEquals(self.param_dict.get(\"foo\"), 1000)\n self.param_dict.set_default(\"foo\")\n self.assertEquals(self.param_dict.get(\"foo\"), 10)\n \n self.assertRaises(ValueError, self.param_dict.set_default, \"qux\")",
"def test_configure_testshib_provider_with_cache_duration(self):\n kwargs = {}\n kwargs.setdefault('name', self.PROVIDER_NAME)\n kwargs.setdefault('enabled', True)\n kwargs.setdefault('visible', True)\n kwargs.setdefault('slug', self.PROVIDER_IDP_SLUG)\n kwargs.setdefault('entity_id', TESTSHIB_ENTITY_ID)\n kwargs.setdefault('metadata_source', TESTSHIB_METADATA_URL_WITH_CACHE_DURATION)\n kwargs.setdefault('icon_class', 'fa-university')\n kwargs.setdefault('attr_email', 'urn:oid:1.3.6.1.4.1.5923.1.1.1.6') # eduPersonPrincipalName\n self.configure_saml_provider(**kwargs)\n assert httpretty.is_enabled()\n num_total, num_skipped, num_attempted, num_updated, num_failed, failure_messages = fetch_saml_metadata()\n assert num_total == 1\n assert num_skipped == 0\n assert num_attempted == 1\n assert num_updated == 1\n assert num_failed == 0\n assert len(failure_messages) == 0",
"def setUp(self):\n super().setUp()\n self.mock_requests(get_geocode_data=copy.deepcopy(test_constants.GET_ADDRESS_CANDIDATES_API_MOCK), \n get_data=copy.deepcopy(test_constants.GET_LIBRARY_API_MOCK))",
"def test_configure():\n\n configs = DTO()\n configs.update(fake_name='fake_name', fake_id='fake_id',\n fake_number=33, fake_value='fake_value')\n application_services.configure(configs)\n\n app_configs = application_services.get_configs()\n assert all(name in app_configs for name in\n ['FAKE_NAME', 'FAKE_ID', 'FAKE_NUMBER', 'FAKE_VALUE'])\n\n assert not any(name in app_configs for name in\n ['fake_name', 'fake_id', 'fake_number', 'fake_value'])",
"def mock_config(monkeypatch: Any) -> None:\n monkeypatch.setattr(\"catapi.config.ENABLE_FOO\", \"true\")\n monkeypatch.setattr(\"catapi.config.ENABLE_BAR\", \"false\")",
"def test_force_override(self):\n DummyLoader.register()\n try:\n DummyLoader.register(override=True)\n except ValueError:\n self.fail('Can not register if passing `override` set to `True`.')",
"def test_provided_data_takes_precedence_over_environ(self, mock_provider, monkeypatch):\n p = mock_provider()\n prefix = f'mock_'\n monkeypatch.setenv(f'{prefix}{p.provider_name}_required'.upper(), 'foo')\n rsp = p.notify(required='bar', env_prefix=prefix)\n assert rsp.status == 'success'\n assert rsp.data['required'] == 'bar'",
"def test_resolution_set_successful(self):\n # We create an instance of the panel so we can check existing values\n panel = ResolutionAdminPanel(self.env)\n\n # Check the environment initially contains the default values.\n self.assertItemsEqual(panel.get_enum_list(), self.default['resolution'])\n\n # create the section, option, and values in configuration\n self.env.config.set('ticket-field-config', 'resolution',\n ','.join(self.new['resolution']))\n\n admin_command = TicketFieldConfigCommand(self.env)\n\n # run our plugin\n admin_command.set_fields_from_config()\n\n self.assertItemsEqual(panel.get_enum_list(), self.new['resolution'])",
"def test_update_reg_ex_config(self):\n pass",
"def test_set_value_valid(self):\r\n name = 'option2'\r\n option = self.config.options[name]\r\n value = 'hello'\r\n\r\n self.config.set_value(name, option, value)\r\n self.assertEqual(self.config.values[name], value)",
"def test_value_base_metadata_param(self):\n value = { 'color': 'blue' }\n base_meta = BaseMetadata(api_client=self.IDS_SYS_CLIENT, value=value)\n self.assertEqual(base_meta.value, value)",
"def test_register_sapsf_metadata_present(self):\n expected_country = 'AU'\n provider_settings = {\n 'sapsf_oauth_root_url': 'http://successfactors.com/oauth/',\n 'sapsf_private_key': 'fake_private_key_here',\n 'odata_api_root_url': 'http://api.successfactors.com/odata/v2/',\n 'odata_company_id': 'NCC1701D',\n 'odata_client_id': 'TatVotSEiCMteSNWtSOnLanCtBGwNhGB',\n }\n\n self._configure_testshib_provider(\n identity_provider_type='sap_success_factors',\n metadata_source=TESTSHIB_METADATA_URL,\n other_settings=json.dumps(provider_settings)\n )\n self._test_register(country=expected_country)",
"def test_set_dict_value_2(self):\n data_dict = {\"type\":\"replace\", \"cluster\":\"\"}\n tickets.set_dict_value(data_dict, \"cluster\", \"A\", \"B\")\n self.assertEqual(data_dict[\"cluster\"], \"B\")",
"def test_country_overrides(self):\n # Retrieve the registration form description\n with override_settings(REGISTRATION_EXTRA_FIELDS={\"country\": \"required\"}):\n response = self.client.get(self.url)\n self.assertHttpOK(response)\n\n self.assertContains(response, 'Kosovo')",
"def test_custom_provider_setting(\n config,\n):\n sms = YesssSMS.YesssSMS(\n LOGIN,\n YESSS_PASSWD,\n custom_provider={\n \"LOGIN_URL\": \"https://example.com/login\",\n \"LOGOUT_URL\": \"https://example.com/logout\",\n \"KONTOMANAGER_URL\": \"https://example.com/kontomanager\",\n \"WEBSMS_FORM_URL\": \"https://example.com/send_websms\",\n \"SEND_SMS_URL\": \"https://example.com/websms\",\n },\n )\n assert sms._login_url == \"https://example.com/login\"\n assert sms._logout_url == \"https://example.com/logout\"\n assert sms._kontomanager == \"https://example.com/kontomanager\"\n assert sms._sms_form_url == \"https://example.com/send_websms\"\n assert sms._send_sms_url == \"https://example.com/websms\"",
"def testMapSetdefault(self):\n # We only use one map type since they all share the same implementation for\n # this logic.\n m = data_types.StepBuildStatsMap()\n with self.assertRaises(AssertionError):\n m.setdefault(1, data_types.BuildStats())\n with self.assertRaises(AssertionError):\n m.setdefault('1', 2)\n m.setdefault('1', data_types.BuildStats())\n self.assertEqual(m, {'1': data_types.BuildStats()})"
] | [
"0.70553404",
"0.66161525",
"0.63435036",
"0.57704574",
"0.56966025",
"0.5679671",
"0.55836266",
"0.53828114",
"0.53427476",
"0.5324517",
"0.53106654",
"0.52406067",
"0.52070177",
"0.5202646",
"0.5187437",
"0.51687354",
"0.51323",
"0.5131696",
"0.5125414",
"0.51162493",
"0.5099038",
"0.50937515",
"0.50830257",
"0.5074649",
"0.5060541",
"0.5043621",
"0.50380784",
"0.5033535",
"0.50331086",
"0.5019184"
] | 0.69396245 | 1 |
Configure the provider such that it can talk to a mockedout version of the SAP SuccessFactors API, and ensure that the data it gets that way gets passed to the registration form. Check that value mappings overrides work in cases where we override a value other than what we're looking for, and when an empty override is provided (expected behavior is that existing value maps will be left alone). | def test_register_sapsf_metadata_present_override_other_value(self):
value_map = {'country': {'United States': 'blahfake'}}
expected_country = 'AU'
provider_settings = {
'sapsf_oauth_root_url': 'http://successfactors.com/oauth/',
'sapsf_private_key': 'fake_private_key_here',
'odata_api_root_url': 'http://api.successfactors.com/odata/v2/',
'odata_company_id': 'NCC1701D',
'odata_client_id': 'TatVotSEiCMteSNWtSOnLanCtBGwNhGB',
}
if value_map:
provider_settings['sapsf_value_mappings'] = value_map
self._configure_testshib_provider(
identity_provider_type='sap_success_factors',
metadata_source=TESTSHIB_METADATA_URL,
other_settings=json.dumps(provider_settings)
)
self._test_register(country=expected_country) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_register_sapsf_metadata_present_override_relevant_value(self):\n value_map = {'country': {'Australia': 'NZ'}}\n expected_country = 'NZ'\n provider_settings = {\n 'sapsf_oauth_root_url': 'http://successfactors.com/oauth/',\n 'sapsf_private_key': 'fake_private_key_here',\n 'odata_api_root_url': 'http://api.successfactors.com/odata/v2/',\n 'odata_company_id': 'NCC1701D',\n 'odata_client_id': 'TatVotSEiCMteSNWtSOnLanCtBGwNhGB',\n }\n if value_map:\n provider_settings['sapsf_value_mappings'] = value_map\n\n self._configure_testshib_provider(\n identity_provider_type='sap_success_factors',\n metadata_source=TESTSHIB_METADATA_URL,\n other_settings=json.dumps(provider_settings)\n )\n self._test_register(country=expected_country)",
"def test_register_sapsf_metadata_present_empty_value_override(self):\n\n value_map = {'country': {}}\n expected_country = 'AU'\n provider_settings = {\n 'sapsf_oauth_root_url': 'http://successfactors.com/oauth/',\n 'sapsf_private_key': 'fake_private_key_here',\n 'odata_api_root_url': 'http://api.successfactors.com/odata/v2/',\n 'odata_company_id': 'NCC1701D',\n 'odata_client_id': 'TatVotSEiCMteSNWtSOnLanCtBGwNhGB',\n }\n if value_map:\n provider_settings['sapsf_value_mappings'] = value_map\n\n self._configure_testshib_provider(\n identity_provider_type='sap_success_factors',\n metadata_source=TESTSHIB_METADATA_URL,\n other_settings=json.dumps(provider_settings)\n )\n self._test_register(country=expected_country)",
"def test_register_sapsf_with_value_default(self):\n # Mock the call to the SAP SuccessFactors OData user endpoint\n ODATA_USER_URL = (\n 'http://api.successfactors.com/odata/v2/User(userId=\\'myself\\')'\n '?$select=firstName,country,lastName,defaultFullName,email'\n )\n\n def user_callback(request, _uri, headers):\n auth_header = request.headers.get('Authorization')\n assert auth_header == 'Bearer faketoken'\n return (\n 200,\n headers,\n json.dumps({\n 'd': {\n 'username': 'jsmith',\n 'firstName': 'John',\n 'lastName': 'Smith',\n 'defaultFullName': 'John Smith',\n 'country': 'Australia'\n }\n })\n )\n\n httpretty.register_uri(httpretty.GET, ODATA_USER_URL, content_type='application/json', body=user_callback)\n\n provider_settings = {\n 'sapsf_oauth_root_url': 'http://successfactors.com/oauth/',\n 'sapsf_private_key': 'fake_private_key_here',\n 'odata_api_root_url': 'http://api.successfactors.com/odata/v2/',\n 'odata_company_id': 'NCC1701D',\n 'odata_client_id': 'TatVotSEiCMteSNWtSOnLanCtBGwNhGB',\n }\n\n self._configure_testshib_provider(\n identity_provider_type='sap_success_factors',\n metadata_source=TESTSHIB_METADATA_URL,\n other_settings=json.dumps(provider_settings),\n default_email='default@testshib.org'\n )\n self.USER_EMAIL = 'default@testshib.org'\n self._test_register()",
"def test_register_insufficient_sapsf_metadata(self):\n self._configure_testshib_provider(\n identity_provider_type='sap_success_factors',\n metadata_source=TESTSHIB_METADATA_URL,\n other_settings='{\"key_i_dont_need\":\"value_i_also_dont_need\"}',\n )\n # Because we're getting details from the assertion, fall back to the initial set of details.\n self.USER_EMAIL = \"myself@testshib.org\"\n self.USER_NAME = \"Me Myself And I\"\n self.USER_USERNAME = \"myself\"\n self._test_register()",
"def _configure_testshib_provider(self, **kwargs):\n fetch_metadata = kwargs.pop('fetch_metadata', True)\n assert_metadata_updates = kwargs.pop('assert_metadata_updates', True)\n kwargs.setdefault('name', self.PROVIDER_NAME)\n kwargs.setdefault('enabled', True)\n kwargs.setdefault('visible', True)\n kwargs.setdefault(\"backend_name\", \"tpa-saml\")\n kwargs.setdefault('slug', self.PROVIDER_IDP_SLUG)\n kwargs.setdefault('entity_id', TESTSHIB_ENTITY_ID)\n kwargs.setdefault('metadata_source', TESTSHIB_METADATA_URL)\n kwargs.setdefault('icon_class', 'fa-university')\n kwargs.setdefault('attr_email', 'urn:oid:1.3.6.1.4.1.5923.1.1.1.6') # eduPersonPrincipalName\n kwargs.setdefault('max_session_length', None)\n kwargs.setdefault('send_to_registration_first', False)\n kwargs.setdefault('skip_email_verification', False)\n saml_provider = self.configure_saml_provider(**kwargs) # pylint: disable=no-member\n\n if fetch_metadata:\n assert httpretty.is_enabled() # lint-amnesty, pylint: disable=no-member\n num_total, num_skipped, num_attempted, num_updated, num_failed, failure_messages = fetch_saml_metadata()\n if assert_metadata_updates:\n assert num_total == 1 # lint-amnesty, pylint: disable=no-member\n assert num_skipped == 0 # lint-amnesty, pylint: disable=no-member\n assert num_attempted == 1 # lint-amnesty, pylint: disable=no-member\n assert num_updated == 1 # lint-amnesty, pylint: disable=no-member\n assert num_failed == 0 # lint-amnesty, pylint: disable=no-member\n assert len(failure_messages) == 0 # lint-amnesty, pylint: disable=no-member\n return saml_provider",
"def test_mocked_api_set_value(self):\n c = Client()\n response = c.get(\"/apimock/mocked/api/account/45/?format=json\")\n self.assertEqual(response.status_code, 200)\n self.assertIn('{\"value\": \"There is a product bought\"}',\n response.content)\n response = c.post(\n \"/apimock/mocked/api/account/45/?format=json\", data={\"PLN\": 100})\n self.assertEqual(response.status_code, 200)\n self.assertIn('{\"PLN\": \"100\"}', response.content)\n response = c.get(\"/apimock/mocked/api/account/45/?format=json\")\n self.assertIn('{\"PLN\": \"100\"}', response.content)",
"def test_mocked_api_set_new_value(self):\n c = Client()\n response = c.get(self.patch_url)\n self.assertEqual(response.status_code, 200)\n self.assertIn('{\"account\": 157}',\n response.content)\n response = c.patch(\n self.patch_url, data={\"PLN\": 20, \"EURO\": 20})\n self.assertEqual(response.status_code, 200)\n self.assertIn('{\"PLN\": 20, \"account\": 157, \"EURO\": 20}',\n response.content)\n response = c.get(self.patch_url)\n self.assertIn('{\"PLN\": 20, \"account\": 157, \"EURO\": 20}',\n response.content)",
"def test_entities__FieldCustomization__set_value__2(address_book):\n fc = IFieldCustomization(address_book)\n field = IAddressBook['time_zone']\n fc.set_value(field, u'label', u'Default time zone value 123')\n fc.set_value(field, u'label', None)\n with pytest.raises(KeyError):\n fc.get_value(field, 'label')",
"def set_provider_defaults(provider: str, config: Dict[str, Any]) -> None:\n\n class SetProviderDefaults:\n @st.hookimpl\n def get_provider_config(self, name, params, registry):\n if name != provider:\n return None\n conf = config.copy()\n conf.update(params)\n return conf\n\n st.registry.register(SetProviderDefaults())",
"def test_configure_override_field_values(ExampleComponentClass):\n\n x = ExampleComponentClass()\n configure(x, {\"a\": 0, \"b\": \"bar\"})\n assert x.a == 0\n assert x.b == \"bar\"",
"def test_entities__NoFieldCustomization__set_value__1(root_folder):\n nfc = IFieldCustomization(root_folder)\n with pytest.raises(NotImplementedError):\n nfc.set_value(IAddressBook['time_zone'], u'label', u'foo')",
"def setup_provider(self):\n pass",
"def test_raises_set_alt_data(self):\n name, value = 'generic_field', 'alt_data_value'\n field = self.form.fields.get(name, None)\n self.assertIsNotNone(field, \"Unable to find the expected field in current fields. \")\n data = {name: (field, value)}\n\n with self.assertRaises(ImproperlyConfigured):\n self.form.set_alt_data(data=data, name=name, field=field, value=value)",
"def _set_result_mapping(self, provider_name, mapping):\n provider_mapping = self._result_mappings.setdefault(provider_name, {})\n if mapping:\n provider_mapping.update(mapping)\n # Ensure the reverse mapping/index is updated (for faster lookups).\n for name, index in provider_mapping.items():\n entries = self._reverse_mapping.setdefault(name, [])\n provider = _Provider(provider_name, index)\n if provider not in entries:\n entries.append(provider)",
"def test_set_default(self):\n result = self.param_dict.get_config()\n self.assertEquals(result[\"foo\"], None)\n self.param_dict.set_default(\"foo\")\n self.assertEquals(self.param_dict.get(\"foo\"), 10)\n self.param_dict.update(\"foo=1000\")\n self.assertEquals(self.param_dict.get(\"foo\"), 1000)\n self.param_dict.set_default(\"foo\")\n self.assertEquals(self.param_dict.get(\"foo\"), 10)\n \n self.assertRaises(ValueError, self.param_dict.set_default, \"qux\")",
"def test_configure_testshib_provider_with_cache_duration(self):\n kwargs = {}\n kwargs.setdefault('name', self.PROVIDER_NAME)\n kwargs.setdefault('enabled', True)\n kwargs.setdefault('visible', True)\n kwargs.setdefault('slug', self.PROVIDER_IDP_SLUG)\n kwargs.setdefault('entity_id', TESTSHIB_ENTITY_ID)\n kwargs.setdefault('metadata_source', TESTSHIB_METADATA_URL_WITH_CACHE_DURATION)\n kwargs.setdefault('icon_class', 'fa-university')\n kwargs.setdefault('attr_email', 'urn:oid:1.3.6.1.4.1.5923.1.1.1.6') # eduPersonPrincipalName\n self.configure_saml_provider(**kwargs)\n assert httpretty.is_enabled()\n num_total, num_skipped, num_attempted, num_updated, num_failed, failure_messages = fetch_saml_metadata()\n assert num_total == 1\n assert num_skipped == 0\n assert num_attempted == 1\n assert num_updated == 1\n assert num_failed == 0\n assert len(failure_messages) == 0",
"def test_configure():\n\n configs = DTO()\n configs.update(fake_name='fake_name', fake_id='fake_id',\n fake_number=33, fake_value='fake_value')\n application_services.configure(configs)\n\n app_configs = application_services.get_configs()\n assert all(name in app_configs for name in\n ['FAKE_NAME', 'FAKE_ID', 'FAKE_NUMBER', 'FAKE_VALUE'])\n\n assert not any(name in app_configs for name in\n ['fake_name', 'fake_id', 'fake_number', 'fake_value'])",
"def setUp(self):\n super().setUp()\n self.mock_requests(get_geocode_data=copy.deepcopy(test_constants.GET_ADDRESS_CANDIDATES_API_MOCK), \n get_data=copy.deepcopy(test_constants.GET_LIBRARY_API_MOCK))",
"def mock_config(monkeypatch: Any) -> None:\n monkeypatch.setattr(\"catapi.config.ENABLE_FOO\", \"true\")\n monkeypatch.setattr(\"catapi.config.ENABLE_BAR\", \"false\")",
"def test_force_override(self):\n DummyLoader.register()\n try:\n DummyLoader.register(override=True)\n except ValueError:\n self.fail('Can not register if passing `override` set to `True`.')",
"def test_provided_data_takes_precedence_over_environ(self, mock_provider, monkeypatch):\n p = mock_provider()\n prefix = f'mock_'\n monkeypatch.setenv(f'{prefix}{p.provider_name}_required'.upper(), 'foo')\n rsp = p.notify(required='bar', env_prefix=prefix)\n assert rsp.status == 'success'\n assert rsp.data['required'] == 'bar'",
"def test_resolution_set_successful(self):\n # We create an instance of the panel so we can check existing values\n panel = ResolutionAdminPanel(self.env)\n\n # Check the environment initially contains the default values.\n self.assertItemsEqual(panel.get_enum_list(), self.default['resolution'])\n\n # create the section, option, and values in configuration\n self.env.config.set('ticket-field-config', 'resolution',\n ','.join(self.new['resolution']))\n\n admin_command = TicketFieldConfigCommand(self.env)\n\n # run our plugin\n admin_command.set_fields_from_config()\n\n self.assertItemsEqual(panel.get_enum_list(), self.new['resolution'])",
"def test_update_reg_ex_config(self):\n pass",
"def test_set_value_valid(self):\r\n name = 'option2'\r\n option = self.config.options[name]\r\n value = 'hello'\r\n\r\n self.config.set_value(name, option, value)\r\n self.assertEqual(self.config.values[name], value)",
"def test_value_base_metadata_param(self):\n value = { 'color': 'blue' }\n base_meta = BaseMetadata(api_client=self.IDS_SYS_CLIENT, value=value)\n self.assertEqual(base_meta.value, value)",
"def test_register_sapsf_metadata_present(self):\n expected_country = 'AU'\n provider_settings = {\n 'sapsf_oauth_root_url': 'http://successfactors.com/oauth/',\n 'sapsf_private_key': 'fake_private_key_here',\n 'odata_api_root_url': 'http://api.successfactors.com/odata/v2/',\n 'odata_company_id': 'NCC1701D',\n 'odata_client_id': 'TatVotSEiCMteSNWtSOnLanCtBGwNhGB',\n }\n\n self._configure_testshib_provider(\n identity_provider_type='sap_success_factors',\n metadata_source=TESTSHIB_METADATA_URL,\n other_settings=json.dumps(provider_settings)\n )\n self._test_register(country=expected_country)",
"def test_set_dict_value_2(self):\n data_dict = {\"type\":\"replace\", \"cluster\":\"\"}\n tickets.set_dict_value(data_dict, \"cluster\", \"A\", \"B\")\n self.assertEqual(data_dict[\"cluster\"], \"B\")",
"def test_custom_provider_setting(\n config,\n):\n sms = YesssSMS.YesssSMS(\n LOGIN,\n YESSS_PASSWD,\n custom_provider={\n \"LOGIN_URL\": \"https://example.com/login\",\n \"LOGOUT_URL\": \"https://example.com/logout\",\n \"KONTOMANAGER_URL\": \"https://example.com/kontomanager\",\n \"WEBSMS_FORM_URL\": \"https://example.com/send_websms\",\n \"SEND_SMS_URL\": \"https://example.com/websms\",\n },\n )\n assert sms._login_url == \"https://example.com/login\"\n assert sms._logout_url == \"https://example.com/logout\"\n assert sms._kontomanager == \"https://example.com/kontomanager\"\n assert sms._sms_form_url == \"https://example.com/send_websms\"\n assert sms._send_sms_url == \"https://example.com/websms\"",
"def test_country_overrides(self):\n # Retrieve the registration form description\n with override_settings(REGISTRATION_EXTRA_FIELDS={\"country\": \"required\"}):\n response = self.client.get(self.url)\n self.assertHttpOK(response)\n\n self.assertContains(response, 'Kosovo')",
"def testMapSetdefault(self):\n # We only use one map type since they all share the same implementation for\n # this logic.\n m = data_types.StepBuildStatsMap()\n with self.assertRaises(AssertionError):\n m.setdefault(1, data_types.BuildStats())\n with self.assertRaises(AssertionError):\n m.setdefault('1', 2)\n m.setdefault('1', data_types.BuildStats())\n self.assertEqual(m, {'1': data_types.BuildStats()})"
] | [
"0.6940789",
"0.6617741",
"0.6345586",
"0.57743275",
"0.5698771",
"0.56804734",
"0.55835336",
"0.5381514",
"0.5341755",
"0.5321272",
"0.5309637",
"0.5242255",
"0.5205009",
"0.5200889",
"0.5184601",
"0.51705694",
"0.51325846",
"0.5132514",
"0.5125601",
"0.51126003",
"0.50996953",
"0.509318",
"0.5084065",
"0.5074553",
"0.5061046",
"0.50475365",
"0.5037039",
"0.5034832",
"0.5031776",
"0.50172603"
] | 0.70560694 | 0 |
Test case for get_chain_by_id | def test_get_chain_by_id(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_chains(self):\n pass",
"def get_chain(self, chain_id):\n if self.chain_dict.has_key(chain_id):\n return self.chain_dict[chain_id]\n return None",
"def test_solareclipses_id_get(self):\n pass",
"def sample_chains():\n c = chain(add.s(1, 1), add.s(1), add.s(1))\n res = c()\n print(res.get())\n print(res.parent.get())\n print(res.parent.parent.get())",
"def chain(self, chain_id, model_num = 0):\n return self.struct[model_num][chain_id]",
"def test_workflows_id_get(self):\n pass",
"def get_chain(self, chain_id):\n if self.default_model is None:\n return None\n if self.default_model.chain_dict.has_key(chain_id):\n return self.default_model.chain_dict[chain_id]\n return None",
"def test_get_case_by_id(self):\n pass",
"def test_liechtensteinsettlements_id_get(self):\n pass",
"def test_coupledmodels_id_get(self):\n pass",
"def test_prefectures_id_get(self):\n pass",
"def create_chain(self, _id, config):\n chain = Chain()\n config[\"instances\"] = self.instances\n chain.setup(_id,config)\n \n return chain",
"def get_message_chain(self, request_id):\n logger.debug('request_id = %s' % request_id)\n with self._message_chains_lock:\n if request_id in self._message_chains:\n return self._message_chains[request_id]\n else:\n #logger.debug('no message chain found for request_id %s' %\n # request_id)\n #for rid, mc in self._message_chains.iteritems():\n # logger.debug(' %s - %s' % (rid, mc))\n return None",
"def validate_chain():",
"def get_chain(self):\n return self.chain",
"def get_chain(self):\n return self.chain",
"def trip_chain(self):\n pass",
"def test_comicscreators_id_get(self):\n pass",
"def get_chain_name (chain):\n if \"-\" in chain.id:\n id_chain=chain.id[-1]\n else:\n id_chain=chain.id\n return id_chain",
"def test_workflows_id_exists_get(self):\n pass",
"def getChain(self, chain):\n\n\t\tfor i in self.chain:\n\t\t\tif i.name == chain:\n\t\t\t\treturn i\n\n\t\treturn None",
"def fetch_chain(self, certr, max_length=10):\n action = LOG_ACME_FETCH_CHAIN()\n with action.context():\n if certr.cert_chain_uri is None:\n return succeed([])\n elif max_length < 1:\n raise errors.ClientError('chain too long')\n return (\n DeferredContext(\n self._client.get(\n certr.cert_chain_uri,\n content_type=DER_CONTENT_TYPE,\n headers=Headers({b'Accept': [DER_CONTENT_TYPE]})))\n .addCallback(self._parse_certificate)\n .addCallback(\n lambda issuer:\n self.fetch_chain(issuer, max_length=max_length - 1)\n .addCallback(lambda chain: [issuer] + chain))\n .addActionFinish())",
"def test_workflows_find_one_get(self):\n pass",
"def test_christiandoctrines_id_get(self):\n headers = { \n 'Accept': 'application/json',\n }\n response = self.client.open(\n '/v0.0.1/christiandoctrines/{id}'.format(id='id_example'),\n method='GET',\n headers=headers)\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))",
"def test_0_3_id_inc2(self):\n\n test = self.b1.id\n test2 = self.b2.id\n test3 = self.b3.id\n self.assertEqual(test, test2 - 1)\n self.assertEqual(test3, 22)",
"def test_beneficiaries_retrieve_withoutID_that_will_fail(self):\n print('the test function name: {}'.format(sys._getframe().f_code.co_name))\n try:\n url = reverse('beneficiary:beneficiary-entity-by-id-retrieve')\n response = self.client.get(url)\n self.assertTrue(response.status_code, 200)\n except Exception as e:\n print(\"reason: \", e)",
"def test_workflows_id_team_get(self):\n pass",
"def test_get_recipe_equipment_by_id(self):\n pass",
"def diagnose_chain(chain):\n if chain[0] == 'all':\n dir = data.meta_dir_base()\n if os.path.exists(dir):\n for chain_id in os.listdir(dir):\n if utils.valid_chain_id(chain_id):\n diagnose_server(chain_id)\n else:\n consoler.info(' No published chain exist, do nothing.')\n else:\n for i in range(len(chain)):\n chain_get = chain[i].split(':')\n if len(chain_get) == 1:\n if utils.valid_chain_id(chain_get[0]):\n diagnose_server(chain_get[0])\n else:\n consoler.info(\n ' skip, invalid chain_id, chain_id is %s', chain_get[0])\n elif len(chain_get) == 2:\n if utils.valid_chain_id(chain_get[0]):\n if utils.valid_ip(chain_get[1]):\n ansible.diagnose_module(\n chain_get[1], ansible.get_dir() + '/' + chain_get[0])\n else:\n consoler.info(\n ' skip, invalid host, chain_id is %s, host is %s', chain_get[0], chain_get[1])\n else:\n consoler.info(\n ' skip, invalid chain_id, chain_id is %s, host is %s', chain_get[0], chain_get[1])\n else:\n consoler.info(\n ' skip, invalid format, not chain_id:host, input %s', chain_get)",
"def get_object(id):"
] | [
"0.66562283",
"0.6344267",
"0.60350573",
"0.6012869",
"0.5983662",
"0.5644152",
"0.5638522",
"0.56080955",
"0.55986637",
"0.5579138",
"0.54949653",
"0.5461469",
"0.54595417",
"0.5459411",
"0.5446837",
"0.5446837",
"0.54265",
"0.53988206",
"0.5392831",
"0.5390298",
"0.5332432",
"0.53292954",
"0.53073096",
"0.52932036",
"0.5274561",
"0.5259532",
"0.52416277",
"0.52276194",
"0.5220462",
"0.52119255"
] | 0.93600637 | 0 |
Test case for get_chains | def test_get_chains(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_chain_by_id(self):\n pass",
"def get_chains (structure):\n chains=[]\n for chain in structure[0]:\n chains.append(chain)\n return chains",
"def iter_chains(self):\n if self.default_model:\n return iter(self.default_model.chain_list)\n return iter(list())",
"def iter_chains(self):\n return iter(self.chain_list)",
"def determine_chains(self, c):\n\n if isinstance(c, str):\n c = [c]\n\n chains = [None for _ in c]\n for k in self.monomer.chains.keys():\n for i, x in enumerate(c):\n if x in self.monomer.chains[k].keys():\n chains[i] = k\n\n return chains",
"def sample_chains():\n c = chain(add.s(1, 1), add.s(1), add.s(1))\n res = c()\n print(res.get())\n print(res.parent.get())\n print(res.parent.parent.get())",
"def test_acyclic_chains():\n names = ['robot', 'box1', 'box2']\n chains = lambda: FactoredRandomGeometricGraph.acyclic_chains(names)\n\n expected_number = 16\n actual_number = sum(1 for _ in chains())\n assert actual_number == expected_number, \\\n \"Expected {} chains; actual value was {}\".format(\n expected_number, actual_number)\n\n assert all(\n FactoredRandomGeometricGraph.is_acyclic(chain)\n for chain in chains())",
"def _test_chain(self, x, class_type_list, kwargs_list, y=None):\n chain, modules = self._create_chain(class_type_list, kwargs_list)\n\n chain = chain.fit(x, y=y)\n self.logger.info(\"Preprocessors chain:\\n{:}\".format(chain))\n\n x_chain = chain.forward(x)\n self.logger.info(\"Trasformed X (chain):\\n{:}\".format(x_chain))\n\n # Train the manual chain and transform\n x_manual = x\n for module in modules:\n module.fit(x_manual, y=y)\n x_manual = module.forward(x_manual)\n\n self.logger.info(\"Trasformed X (manual):\\n{:}\".format(x_manual))\n self.assert_allclose(x_chain, x_manual)\n\n return x_chain",
"def test_find_long_chains_multiple(self):\n # a -> b -> c -> ... x\n # \\________________/\n self.skill_graph = SkillGraph.load()\n old_skill = self.skill_graph.add(Skill.build('o', ''))\n last_skill = self.skill_graph.add(Skill.build('l', ''))\n self.skill_graph.add_prerequisite(last_skill.id, old_skill.id)\n chain_ids = [old_skill.id]\n for index in range(CHAINS_MIN_LENGTH):\n new_skill = self.skill_graph.add(Skill.build(str(index), ''))\n chain_ids.append(new_skill.id)\n self.skill_graph.add_prerequisite(new_skill.id, old_skill.id)\n old_skill = new_skill\n self.skill_graph.add_prerequisite(old_skill.id, last_skill.id)\n skill_map = SkillMap.load(self.course)\n result = SkillMapMetrics(skill_map).long_chains()\n self.assertEqual([chain_ids], result)",
"def test_find_long_chains(self):\n # a --> d --> j g h --> i\n # b _/ c --> e --> f\n self._build_sample_graph()\n # Adding singleton\n sg = self.skill_graph.add(Skill.build('g', ''))\n # Adding short path\n sh = self.skill_graph.add(Skill.build('h', ''))\n si = self.skill_graph.add(Skill.build('i', ''))\n self.skill_graph.add_prerequisite(si.id, sh.id)\n # Making path longer\n sj = self.skill_graph.add(Skill.build('j', ''))\n self.skill_graph.add_prerequisite(sj.id, self.sd.id)\n skill_map = SkillMap.load(self.course)\n result = SkillMapMetrics(skill_map).long_chains(2)\n expected = [\n [self.sa.id, self.sd.id, sj.id],\n [self.sb.id, self.sd.id, sj.id],\n [self.sc.id, self.se.id, self.sf.id]\n ]\n self.assertEqual(sorted(expected), sorted(result))",
"def test_rewrite_chains_cover(self):\n cb = Mock()\n self.ipt.rewrite_chains(\n {\"foo\": [\"--append foo --jump bar\"]},\n {\"foo\": set([\"bar\"])},\n async=True,\n callback=cb,\n )\n self.step_actor(self.ipt)\n cb.assert_called_once_with(None)",
"def get_all_chains() -> List[ChainInfo]:\n return list(registry.chain_dict.values())",
"def chain():\n return eth_tester.EthereumTester(eth_tester.PyEVMBackend())",
"def f_chains(self) -> List[Callable[[], Chain]]:\n return [delayed_run_chain() for _ in range(self.n_chains)]",
"def test_sort_chain_multiple_structure_random():\n data = [-10, 42, 8, 64, -6, 76, 48, 8, -30, 1, 11, 92, 37, 4]\n chain = None\n for item in data:\n chain = N.Node(item, chain)\n\n result = A8.sort_chain(chain)\n\n walker = result\n for i in range(len(data)):\n assert walker is not None, \"sort_chain returned chain of length {} given chain with randomish values\".format(i)\n walker = walker.next\n\n assert walker is None, \"sort_chain returned chain longer than length {} given chain with randomish values\".format(len(data))",
"def iter_all_chains(self):\n for model in self.model_list:\n for chain in model.chain_list:\n yield chain",
"def test_rewrite_chains_stub(self):\n self.ipt.rewrite_chains(\n {\"foo\": [\"--append foo --jump bar\"]},\n {\"foo\": set([\"bar\"])},\n async=True,\n )\n self.step_actor(self.ipt)\n self.assertEqual(self.stub.chains_contents,\n {\"foo\": [\"--append foo --jump bar\"],\n 'bar': [MISSING_CHAIN_DROP % \"bar\"]})",
"def make_chains(text_string, n):\n\n chains = {}\n\n # your code goes here\n words = text_string.split()\n #print words\n \n for i in range(len(words) - n):\n # next_word = words[i+2]\n #print \"Next Word\",next_word\n key_word_tuple = tuple(words[i:(i + n)])\n # print key_word_tuple\n #, words[i + 1])\n chains[key_word_tuple] = chains.get(key_word_tuple, [])\n # if (i + n) == (len(words) - 1):\n # next_word = words[0]\n # print \"i\", i\n # print \"BLINGGGG\"\n # print \"End of Range \",next_word, \"key word tuple \", key_word_tuple\n # # if (i + 2) < len(words):\n # else: \n next_word = words[i+n]\n # print next_word\n\n chains[key_word_tuple].append(next_word)\n \n\n \n \n \n # print chains[\"I\",\"am\"] \n # print chains\n return chains",
"def genChains(self):\n self.numMonomer = 0\n self.numBonds = 0\n self.numMols = 0\n self.numCations = 0\n self.numAnions = 0\n\n self.atomsCoords = []\n self.atomsType = []\n self.atomsCharge = []\n self.molId = []\n self.bondList = []\n \n for i in range(self.numPa + self.numPc):\n\n if i < self.numPc:\n # polycation chains, charge in LJ units of LAMMPS\n # electron charge would be 10.54 using bare LAMMPS LJ units\n # the dielectric constans of solvent is effectively taken as 111 when assign 1 to +e\n # just need to set dielectric as 0.72 in LAMMPS ot mimic water with dielectric constant 80\n self.beadCharge = 1\n self.beadType = 1 # atomic type for neutral beads in polycation chains\n self.chain = self.lenPc\n else:\n self.beadCharge = -1 # polyanion chains\n self.beadType = 3 # atomic type for neutral beads in polyanion chains\n self.chain = self.lenPa\n\n self.numMols += 1\n\n # generate the first bead of each chain randomly\n self.numMonomer += 1\n self.cxyz = np.random.rand(3) * self.box + self.lxyz\n\n self.atomsCoords.append(self.cxyz)\n #self.atomsType.append(self.beadType)\n\n # decide if the first bead is charged or not\n if self.chargeRepeat == 1:\n self.atomsCharge.append(self.beadCharge)\n self.atomsType.append(self.beadType + 1)\n if i < self.numPc:\n self.numCations += 1\n else:\n self.numAnions += 1\n else:\n self.atomsType.append(self.beadType)\n self.atomsCharge.append(0)\n\n self.molId.append(self.numMols)\n\n self.currpxyz = self.cxyz\n\n # follow random walk to generate the chain\n # generate the seconb bead of the chain\n self.theta, self.phi = np.random.rand(2) * np.array([np.pi, 2 * np.pi])\n self.ds = np.array([np.cos(self.theta), np.sin(self.theta) * np.cos(self.phi),\\\n np.sin(self.theta) * np.sin(self.phi)]) * self.segment\n\n self.cxyz = self.currpxyz + self.ds\n\n self.numMonomer += 1\n self.atomsCoords.append(self.cxyz)\n\n # decide if the second bead is charged or not\n if 2%self.chargeRepeat == 0:\n self.atomsCharge.append(self.beadCharge)\n self.atomsType.append(self.beadType + 1)\n if i < self.numPc:\n self.numCations += 1\n else:\n self.numAnions += 1\n else:\n self.atomsCharge.append(0)\n self.atomsType.append(self.beadType)\n\n self.molId.append(self.numMols)\n \n self.numBonds += 1\n self.bondList.append([self.numMonomer - 1, self.numMonomer])\n\n self.currpxyz = self.cxyz\n\n self.currtheta = self.theta\n self.currphi = self.phi\n\n self.dstot += self.ds\n\n # generating the rest beads of the chain\n\n for k in range(3, self.chain+1):\n # only accept atoms that are beyong certain distance\n # from the atom precding the current atom in the chain\n self.theta, self.phi = np.random.rand() * np.array([np.pi - self.stiffangle, \\\n 2 * np.pi])\n self.ds1 = np.array([np.cos(self.theta), np.sin(self.theta) * np.cos(self.phi),\\\n np.sin(self.theta) * np.sin(self.phi)]) * self.segment\n\n self.reverseXZrotation()\n self.cxyz = self.currpxyz + self.ds\n\n self.numMonomer += 1\n self.atomsCoords.append(self.cxyz)\n\n if k % self.chargeRepeat == 0:\n self.atomsCharge.append(self.beadCharge)\n self.atomsType.append(self.beadType + 1)\n if i < self.numPc:\n self.numCations += 1\n else:\n self.numAnions += 1\n else:\n self.atomsCharge.append(0)\n self.atomsType.append(self.beadType)\n\n self.molId.append(self.numMols)\n self.numBonds += 1\n self.bondList.append([self.numMonomer - 1, self.numMonomer])\n\n self.currpxyz = self.cxyz\n\n self.currtheta = np.arccos(self.ds[0]/self.segment)\n if self.ds[2] > 0:\n self.currphi = np.arccos(self.ds[1]/self.segment/np.sin(self.currtheta))\n else:\n self.currphi = 2*np.pi - np.arccos(self.ds[1]/self.segment/np.sin(self.currtheta))\n\n self.dstot += self.ds\n\n print \"%d beads are generated.\\n\" % self.numMonomer \n assert self.numMonomer == self.numPc * self.lenPc + self.numPa * self.lenPa, \\\n \"The number of monomers in chains is wrong!\\n\"\n assert self.numCations == int(np.floor(self.lenPc * self.chargeFraction)*self.numPc), \\\n \"The number of positively charged beads is wrong!\\n\"\n assert self.numAnions == int(np.floor(self.lenPa * self.chargeFraction)*self.numPa), \\\n \"The number of negatively charged beads is wrong!\\n\"",
"def test_sort_chain_multiple_structure_increasing():\n n = 11\n data = range(n)\n chain = None\n for item in data:\n chain = N.Node(n-item, chain)\n\n result = A8.sort_chain(chain)\n\n walker = result\n for i in range(n):\n assert walker is not None, \"sort_chain returned chain of length {} given chain with values increasing\".format(i)\n walker = walker.next\n\n assert walker is None, \"sort_chain returned chain longer than length {} given chain with values increasing\".format(n)",
"def test_required_deleted_chain_gets_stubbed(self):\n self.txn.store_delete(\"felix-b\")\n self.assertEqual(self.txn.affected_chains, set([\"felix-b\"]))\n self.assertEqual(self.txn.chains_to_stub_out, set([\"felix-b\"]))\n self.assertEqual(self.txn.chains_to_delete, set())\n self.assertEqual(self.txn.referenced_chains,\n set([\"felix-b\", \"felix-stub\"]))\n self.assertEqual(\n self.txn.prog_chains,\n {\n \"felix-a\": [],\n \"felix-c\": [],\n })\n self.assertEqual(self.txn.required_chns,\n {\"felix-a\": set([\"felix-b\", \"felix-stub\"])})\n self.assertEqual(self.txn.requiring_chns,\n {\"felix-b\": set([\"felix-a\"]),\n \"felix-stub\": set([\"felix-a\"])})",
"def numChains(self):\n\n\t\treturn len(self.chain)",
"def count_chains(self):\n return len(self.chain_list)",
"def chains(self, model_num = 0):\n return [c for c in self.struct]",
"def test_markow_chain():\n amount = len(markow_chain(SNULL, TIMESTEPS, PROBABILITYMATRIX))\n assert TIMESTEPS == amount",
"def test_chain(self):\n self._test_chain(self.array_dense,\n ['min-max', 'pca', 'min-max', 'rbf', 'svm'],\n [{'feature_range': (-5, 5)}, {},\n {'feature_range': (0, 1)}, {}, {}],\n y=self.labels)",
"def make_chains(text_string):\n\n chains = {}\n words = text_string.split()\n\n for i in range(len(words) - 2):\n key = (words[i], words[i + 1])\n value = words[i + 2]\n #print key, value\n\n if key not in chains:\n chains[key] = []\n chains[key].append(value)\n\n # print chains\n return chains",
"def make_chains(input_text, n):\n\n # contents = open_and_read_file(sys.argv[1])\n\n chains = {}\n\n words = input_text.split()\n\n for i in range(len(words) - 2):\n a, b = words[i], words[i+1]\n pair = (a, b,)\n\n if pair in chains:\n chains[pair] += [words[i+2]]\n else:\n chains[pair] = [words[i+2]]\n # if chains.get(pair, False):\n # c = words[i + 2]\n # chains[pair].append(c)\n # # how can we have an empty list as a value and not reset?\n # else:\n # c = words[i + 2]\n # chains[pair] = []\n # chains[pair].append(c)\n\n # print \"C equals: \", c\n # chains[pair].append(c)\n # else add \"\" to dictionary\n return chains",
"def test_post_chain_search(self):\n pass",
"def decimateChains(chains, max_err = 200):\n newchains = []\n for chain in chains:\n vs = chain - chain[0]\n angles = np.arctan2(vs[:,1], vs[:,0])\n vas = angles - angles[-1]\n ds = np.linalg.norm(vs, axis=1)\n errs = np.abs(np.sin(vas) * ds)\n id_far = np.argmax(errs)\n if errs[id_far] > max_err:\n newchains += decimateChains([chain[:id_far+1], chain[id_far:]], max_err)\n else:\n newchains.append(chain)\n return newchains"
] | [
"0.6914287",
"0.67781395",
"0.6644035",
"0.6538033",
"0.652312",
"0.6489876",
"0.64195883",
"0.63450104",
"0.62671566",
"0.61932814",
"0.60854757",
"0.6066701",
"0.605091",
"0.6029899",
"0.6004666",
"0.60018355",
"0.5970353",
"0.59533113",
"0.5910838",
"0.5885802",
"0.58681154",
"0.5850139",
"0.5832499",
"0.5813991",
"0.5785004",
"0.5756634",
"0.5742945",
"0.57267576",
"0.571478",
"0.5712835"
] | 0.94273794 | 0 |
Test case for post_chain | def test_post_chain(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_post_chain_search(self):\n pass",
"def test_rewrite_chains_cover(self):\n cb = Mock()\n self.ipt.rewrite_chains(\n {\"foo\": [\"--append foo --jump bar\"]},\n {\"foo\": set([\"bar\"])},\n async=True,\n callback=cb,\n )\n self.step_actor(self.ipt)\n cb.assert_called_once_with(None)",
"def test_post_foods(self):\n pass",
"def test_post_process(self):\n self.executed = False\n\n post_procs = pyamf.POST_DECODE_PROCESSORS[:]\n\n def restore_post_procs():\n pyamf.POST_DECODE_PROCESSORS = post_procs\n\n self.addCleanup(restore_post_procs)\n pyamf.POST_DECODE_PROCESSORS = []\n\n def postprocess(payload, context):\n self.assertEqual(payload, u'foo')\n self.assertEqual(context, {})\n\n self.executed = True\n\n return payload\n\n pyamf.add_post_decode_processor(postprocess)\n\n # setup complete\n bytes = pyamf.encode(u'foo', encoding=pyamf.AMF3).getvalue()\n\n self.decoder.send(bytes)\n ret = next(self.decoder)\n\n self.assertTrue(self.executed)\n self.assertEqual(ret, u'foo')",
"def test_get_chains(self):\n pass",
"def test_rewrite_chains_stub(self):\n self.ipt.rewrite_chains(\n {\"foo\": [\"--append foo --jump bar\"]},\n {\"foo\": set([\"bar\"])},\n async=True,\n )\n self.step_actor(self.ipt)\n self.assertEqual(self.stub.chains_contents,\n {\"foo\": [\"--append foo --jump bar\"],\n 'bar': [MISSING_CHAIN_DROP % \"bar\"]})",
"def test_posthardwares(self):\n pass",
"def test_tail_call(self):",
"def test_tranform_chain() -> None:\n transform_chain = TransformChain(\n input_variables=[\"first_name\", \"last_name\"],\n output_variables=[\"greeting\"],\n transform=dummy_transform,\n )\n input_dict = {\"first_name\": \"Leroy\", \"last_name\": \"Jenkins\"}\n response = transform_chain(input_dict)\n expected_response = {\"greeting\": \"Leroy Jenkins says hello\"}\n assert response == expected_response",
"def validate_chain():",
"def _test_chain(self, x, class_type_list, kwargs_list, y=None):\n chain, modules = self._create_chain(class_type_list, kwargs_list)\n\n chain = chain.fit(x, y=y)\n self.logger.info(\"Preprocessors chain:\\n{:}\".format(chain))\n\n x_chain = chain.forward(x)\n self.logger.info(\"Trasformed X (chain):\\n{:}\".format(x_chain))\n\n # Train the manual chain and transform\n x_manual = x\n for module in modules:\n module.fit(x_manual, y=y)\n x_manual = module.forward(x_manual)\n\n self.logger.info(\"Trasformed X (manual):\\n{:}\".format(x_manual))\n self.assert_allclose(x_chain, x_manual)\n\n return x_chain",
"def test_post_order_traversal(our_bsts):\n bpost = []\n for i in our_bsts[0].post_order():\n bpost.append(i)\n assert bpost == our_bsts[6]",
"def _post(self, *args, **kwargs):\n return _TestA_swig.my_qpsk_demod_cb_sptr__post(self, *args, **kwargs)",
"def _post(self, *args, **kwargs):\n return _TestA_swig.cleanslate_sptr__post(self, *args, **kwargs)",
"def after_test(self, func, *args, **kwargs):\n pass",
"def chain_cmd(ctx):\n pass",
"def forward_test(self, *args, **kwargs):\n pass",
"def test_check_duplication_entry_at_restoring_two_chain(self):\n ref_entity = Entity.objects.create(name=\"ReferredEntity\", created_user=self._user)\n ref_entries = [\n Entry.objects.create(name=\"ref-%d\" % i, created_user=self._user, schema=ref_entity)\n for i in range(3)\n ]\n ref_entity_2 = Entity.objects.create(name=\"ReferredEntity2\", created_user=self._user)\n ref_entries_2 = [\n Entry.objects.create(name=\"ref2-%d\" % i, created_user=self._user, schema=ref_entity_2)\n for i in range(3)\n ]\n\n # initialize EntityAttrs\n attr_info = {\n \"obj\": {\"type\": AttrTypeValue[\"object\"], \"value\": ref_entries[0]},\n \"arr_obj\": {\n \"type\": AttrTypeValue[\"array_object\"],\n \"value\": ref_entries[1:],\n },\n }\n attr_info_2 = {\n \"obj\": {\"type\": AttrTypeValue[\"object\"], \"value\": ref_entries_2[0]},\n \"arr_obj\": {\n \"type\": AttrTypeValue[\"array_object\"],\n \"value\": ref_entries_2[1:],\n },\n }\n for attr_name, info in attr_info.items():\n # create EntityAttr object with is_delete_in_chain object\n attr = EntityAttr.objects.create(\n name=attr_name,\n type=info[\"type\"],\n is_delete_in_chain=True,\n created_user=self._user,\n parent_entity=self._entity,\n )\n\n if info[\"type\"] & AttrTypeValue[\"object\"]:\n attr.referral.add(ref_entity)\n\n self._entity.attrs.add(attr)\n\n for attr_name, info in attr_info_2.items():\n # create EntityAttr object with is_delete_in_chain object\n attr = EntityAttr.objects.create(\n name=attr_name,\n type=info[\"type\"],\n is_delete_in_chain=True,\n created_user=self._user,\n parent_entity=ref_entity,\n )\n\n if info[\"type\"] & AttrTypeValue[\"object\"]:\n attr.referral.add(ref_entity_2)\n\n ref_entity.attrs.add(attr)\n\n # initialize target entry\n entry = Entry.objects.create(name=\"entry\", schema=self._entity, created_user=self._user)\n entry.complement_attrs(self._user)\n\n for attr_name, info in attr_info.items():\n attr = entry.attrs.get(schema__name=attr_name)\n attr.add_value(self._user, info[\"value\"])\n\n ref_entries[0].complement_attrs(self._user)\n for attr_name, info in attr_info_2.items():\n attr = ref_entries[0].attrs.get(schema__name=attr_name)\n attr.add_value(self._user, info[\"value\"])\n\n # delete target entry at first\n entry.delete()\n # sync referral entries from database\n [x.refresh_from_db() for x in ref_entries]\n [x.refresh_from_db() for x in ref_entries_2]\n\n self.assertFalse(ref_entries_2[1].is_active)\n\n # create same name entry\n Entry.objects.create(name=\"ref2-1\", created_user=self._user, schema=ref_entity_2)\n\n # check duplicate entry\n ret = entry.check_duplication_entry_at_restoring(entry_chain=[])\n self.assertTrue(ret)",
"def test_post(self):\n pass",
"def test_postorder_traversal(depth_one_tree):\n testlist = []\n depth_one_tree.post_order(lambda x: testlist.append(x))\n assert str(testlist) == str([1, 2, 3, 4, 0])",
"def test_post_no_operation(capsys):\n a = K()\n with pytest.raises(TypeError):\n assert a.post_order()",
"def test_workflows_post(self):\n pass",
"def _postprocess(self):",
"def test_process_postpay_accepted(self):\r\n student1 = UserFactory()\r\n student1.save()\r\n\r\n order1 = Order.get_cart_for_user(student1)\r\n params = {\r\n 'card_accountNumber': '1234',\r\n 'card_cardType': '001',\r\n 'billTo_firstName': student1.first_name,\r\n 'orderNumber': str(order1.id),\r\n 'orderCurrency': 'usd',\r\n 'decision': 'ACCEPT',\r\n 'ccAuthReply_amount': '0.00'\r\n }\r\n result = process_postpay_callback(params)\r\n self.assertTrue(result['success'])\r\n self.assertEqual(result['order'], order1)\r\n order1 = Order.objects.get(id=order1.id) # reload from DB to capture side-effect of process_postpay_callback\r\n self.assertEqual(order1.status, 'purchased')\r\n self.assertFalse(result['error_html'])",
"def test_issue_post_issue_reaction(self):\n pass",
"def test_user_actions_post(self):\n pass",
"def post_processor(self):",
"def _post_hooks(self):",
"def test_fall_through(self):\n dec = self.actions(self.mock_model, [\"doit\"], fall_through=True)\n req = self.req(\"post\", \"/the/url\", data={\"other\": \"thing\"})\n\n res = self.view(req, decorator=dec)\n\n self.assertEqual(self.mock_model._base_manager.get.call_count, 0)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(res.request.method, \"POST\")\n self.assertEqual(res.request.POST[\"other\"], \"thing\")",
"def testPost(self):\n self.handler.handle = self.mox.CreateMockAnything()\n self.handler.handle('POST', '/my_service', 'method1')\n self.handler.handle('POST', '/my_other_service', 'method2')\n\n self.mox.ReplayAll()\n\n self.handler.post('/my_service', 'method1')\n self.handler.post('/my_other_service', 'method2')\n\n self.mox.VerifyAll()"
] | [
"0.72168523",
"0.65788776",
"0.6171648",
"0.6129438",
"0.61099774",
"0.61021096",
"0.60098636",
"0.5998518",
"0.59712416",
"0.5869386",
"0.58414704",
"0.5824033",
"0.5820647",
"0.5788468",
"0.5751005",
"0.5730567",
"0.57289803",
"0.57287365",
"0.5715039",
"0.5704063",
"0.5691138",
"0.56803733",
"0.5641229",
"0.5613563",
"0.5599987",
"0.55970144",
"0.5593933",
"0.55780613",
"0.55520594",
"0.5549403"
] | 0.9139945 | 0 |
Test case for post_chain_search | def test_post_chain_search(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_post_chain(self):\n pass",
"def test_post_foods_search(self):\n pass",
"def test_search(self):\n pass",
"def test_search(self):\n pass",
"def test_search(self):\n pass",
"def test_search_systems_post(self):\n pass",
"def test_search_organizations_post(self):\n pass",
"def test_search(self):\n d = self._search()\n self._response([2, 5, 10])\n self.assertEqual(self.successResultOf(d), [2, 5, 10])",
"def test_get_chains(self):\n pass",
"def test_analyze_a_recipe_search_query(self):\n pass",
"def test_search_recipes(self):\n pass",
"def test_autocomplete_recipe_search(self):\n pass",
"def processSearchResult(self):",
"def test_process_searchbox_with_mock(self):\n\n result = self.client.get('/process_searchbox', data={'zipcode': '94043', 'cuisine': 'indian'})\n self.assertIn(b\"Dosa Paratha\", result.data)",
"def test_act_is_searching(self):\n # setup\n self.strategy._is_searching = True\n\n # operation\n self.search_behaviour.act()\n\n # after\n self.assert_quantity_in_outbox(1)\n has_attributes, error_str = self.message_has_attributes(\n actual_message=self.get_message_from_outbox(),\n message_type=OefSearchMessage,\n performative=OefSearchMessage.Performative.SEARCH_SERVICES,\n to=self.skill.skill_context.search_service_address,\n sender=str(self.skill.public_id),\n query=self.skill.skill_context.strategy.get_location_and_service_query(),\n )\n assert has_attributes, error_str",
"def test_search_systemusers_post(self):\n pass",
"def search(self, *args, **kwargs):",
"def test_rewrite_chains_cover(self):\n cb = Mock()\n self.ipt.rewrite_chains(\n {\"foo\": [\"--append foo --jump bar\"]},\n {\"foo\": set([\"bar\"])},\n async=True,\n callback=cb,\n )\n self.step_actor(self.ipt)\n cb.assert_called_once_with(None)",
"def test_act_not_is_searching(self):\n # setup\n self.strategy._is_searching = False\n\n # operation\n self.search_behaviour.act()\n\n # after\n self.assert_quantity_in_outbox(0)",
"def test_determine_search_method(): # ***Incomplete test\n ##########################\n # Arrange.\n query_exten = \"query_exten\"\n db_exten = \"db_exten\"\n\n ##########################\n # Act.\n #x = determine_search_method(query_exten,\n #\t\tdb_exten)\n\n ##########################\n # Assert.\n assert True == True # ***Temporary.",
"def test_index_page_post(self):\n tester = app.test_client(self)\n response = tester.post('/',data = dict(search_address = \"paris\"), follow_redirects=True)\n print(\"post response code : \",response.status_code)\n self.assertEqual(response.status_code,200)\n assert b\"Moscow Ring Road Distance Finder\" not in response.data",
"def test_match_table_post(self):\n pass",
"def test_search_test_search_returns_correct_menu(self):\n # create some db records\n dataset = self.create_mixed_test_data()\n test_search_string = 'bravo'\n\n with patch('builtins.input', side_effect=test_search_string):\n result = self.menu.search_text_search()\n\n expected_result = self.menu.present_next_result\n\n self.assertEqual(expected_result, result)",
"def test_search(self):\n from rubber import resource\n requestmock = RequestMock()\n resource.requests = requestmock\n\n q = {'query': {'term': {'user': 'kimchy'}}}\n self.Article.elasticsearch.search(q, toto='titi')\n\n self.assertEquals(1, len(requestmock.stack))\n self.assertEquals('http://example.com:9200/tests/article/_search', requestmock.stack[0]['url'])\n self.assertEquals('GET', requestmock.stack[0]['method'])\n self.assertEquals('titi', requestmock.stack[0]['kwargs']['toto'])\n from rubber.instanceutils import data_to_json\n self.assertEquals(data_to_json(q), requestmock.stack[0]['kwargs']['data'])\n\n self.Article.elasticsearch.mapping.put({'some': 'mapping'}, toto='titi')\n\n self.assertEquals(2, len(requestmock.stack))\n self.assertEquals('http://example.com:9200/tests/article/_mapping', requestmock.stack[1]['url'])\n self.assertEquals('PUT', requestmock.stack[1]['method'])\n self.assertEquals('titi', requestmock.stack[1]['kwargs']['toto'])",
"def test_candidates_retrieve(self):\n pass",
"def test_enable_case_search_reindex(self, fake_factor):\n enable_case_search(self.domain)\n self.assertEqual(fake_factor.call_args, call(domain=self.domain))\n self.assertTrue(fake_factor().build.called)\n self.assertTrue(fake_factor().build().reindex.called)",
"def test_search(self):\n with unittest.mock.patch('builtins.input', return_value='a'):\n good = self.ec.search()\n self.assertTrue(good)\n with unittest.mock.patch('builtins.input', return_value='b'):\n good = self.ec.search()\n self.assertTrue(good)\n with unittest.mock.patch('builtins.input', return_value='c'):\n good = self.ec.search()\n self.assertTrue(good)\n with unittest.mock.patch('builtins.input', return_value='d'):\n good = self.ec.search()\n self.assertTrue(good)\n with unittest.mock.patch('builtins.input', return_value='e'):\n good = self.ec.search()\n self.assertTrue(good)\n with unittest.mock.patch('builtins.input', return_value='q'):\n good = self.ec.search()\n self.assertFalse(good)",
"def test_search_multiresults(self):\n self.assertEquals(len(self.t['Scrubs'].search('my first')) >= 3, True)",
"def test_get_foods_search(self):\n pass",
"def test_absorbs_naked_a_search(self):\n invenio_search = \"author:ellis\"\n naked_search = \"a ellis\"\n self._compare_searches(invenio_search, naked_search)"
] | [
"0.7415727",
"0.6913196",
"0.66014063",
"0.66014063",
"0.66014063",
"0.65811795",
"0.6300726",
"0.615566",
"0.5992487",
"0.5984323",
"0.59211963",
"0.58675545",
"0.58613753",
"0.58335793",
"0.58033735",
"0.5775473",
"0.57752",
"0.5766603",
"0.57302696",
"0.57134694",
"0.5695927",
"0.5666814",
"0.5652642",
"0.5644926",
"0.5625471",
"0.56222975",
"0.562001",
"0.5603048",
"0.5600581",
"0.55873066"
] | 0.9217483 | 0 |
Build a networkx graph object from variables and relations. | def as_networkx_graph(variables, relations):
graph = nx.Graph()
# One node for each variables
graph.add_nodes_from([v.name for v in variables])
for r in relations:
for p in all_pairs([e.name for e in r.dimensions]):
graph.add_edge(*p)
return graph | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def as_networkx_bipartite_graph(variables, relations):\n graph = nx.Graph()\n\n # One node for each variables\n graph.add_nodes_from([v.name for v in variables], bipartite=0)\n graph.add_nodes_from([r.name for r in relations], bipartite=1)\n\n for r in relations:\n for e in r.dimensions:\n graph.add_edge(r.name, e.name)\n return graph",
"def initialize_graph(compound_relations, relation_types):\n graph = nx.DiGraph()\n for compound, targets in compound_relations.items():\n for target, relation in targets.items():\n if relation in relation_types:\n graph.add_edge(compound, target)\n return graph",
"def build_graph(self):\n for node in self.graph.nodes():\n self.c2py[node] = PyNode(node)\n for _input in node.inputs():\n if _input not in self.c2py:\n self.c2py[_input] = PyNode(_input, True)\n if _input in self.forward_edge:\n self.forward_edge[_input].append(node)\n else:\n self.forward_edge[_input] = [node]\n for output in node.outputs():\n if output not in self.c2py:\n self.c2py[output] = PyNode(output, True)\n if node in self.forward_edge:\n self.forward_edge[node].append(output)\n else:\n self.forward_edge[node] = [output]",
"def build_graph(edges):\n \n G = nx.MultiGraph()\n G.add_edges_from(edges)\n return G",
"def parse_graph(self):\n\t\tnx_graph = nx.Graph()\n\t\tfor node in self.vertices:\n\t\t\tnx_graph.add_node(node)\n\n\t\tfor edge in self.edges:\n\t\t\tnode1, node2, weight = edge\n\t\t\tnx_graph.add_edge(node1, node2, weight=weight)\n\n\t\treturn nx_graph",
"def build_graph(self):\n pass",
"def to_networkx(self):\n g = nx.Graph()\n for v in self.vs.values():\n g.add_node(v)\n for v in self.fs:\n g.add_node(v)\n for u in v.neighbors:\n g.add_edge(v, u)\n return g",
"def _build_graph(self):\n pass",
"def build_node_graph(self):\n G = pgv.AGraph(strict=False, directed=True)\n temp_dict = defaultdict(int) #key - from_to_ip, val - counter\n\n for i, ip in enumerate(self.node_graph_dict.keys()):\n G.add_node(ip, shape='rect', label='%d' % i)\n logger.info(\"All nodes added\")\n\n for ip, droplist in self.node_graph_dict.iteritems():\n for gnid, dropids in droplist:\n for did in dropids:\n tip = self.gnid_ip_dict[self.oid_gnid_dict[did]]\n k = '{0}_{1}'.format(ip, tip)\n temp_dict[k] += 1\n\n for k, v in temp_dict.iteritems():\n ks = k.split('_')\n G.add_edge(ks[0], ks[1], weight=v)\n\n return G",
"def build_graph(self):\n self.__create_placeholders()\n self.__create_encoder()\n self.__create_latent()\n self.__create_decoder()\n self.__create_loss()\n self.__create_generate()\n self.__create_reconstruct()\n self.__create_optimizer()\n self.__create_summary()",
"def build_graph(graph_dict): \n #make networkX graph\n G = nx.Graph()\n G.add_nodes_from(graph_dict.keys())\n for key in graph_dict:\n for i in range(len(graph_dict[key])):\n G.add_edge(key,graph_dict[key][i])\n return G",
"def build_graph(self):\n\t\tself._create_placeholders()\n\t\tself._create_embedding()\n\t\tself._create_recurrent_layers()\n\t\tself._create_de_embedding()\n\t\tself._create_loss()\n\t\tself._create_optimizer()\n\t\tself._create_summaries()",
"def build_graph():\n file = open(\"../data/data.json\", \"r\")\n data = json.load(file)\n node_dict = {}\n for id in data:\n node_dict[id] = Node(data[id][\"name\"], data[id][\"product\"], data[id][\"production_volume\"])\n for id in data:\n current_node = node_dict[id]\n for costumer_id in data[id][\"costumers\"]:\n current_node.costumers.append(node_dict[str(costumer_id)])\n current_node.out_edge_capacity_drop[node_dict[str(costumer_id)].name] = 0\n for supplier_id in data[id][\"suppliers\"]:\n current_node.suppliers.append(node_dict[str(supplier_id)])\n current_node.in_edge_capacity_drop[node_dict[str(supplier_id)].name] = 0\n return node_dict",
"def example_graph():\n g = nx.Graph()\n g.add_edges_from([('A', 'B'), ('A', 'C'), ('B', 'C'), ('B', 'D'), ('D', 'E'), ('D', 'F'), ('D', 'G'), ('E', 'F'), ('G', 'F')])\n return g",
"def _create_nx_graph(self):\n #_graph = nx.Graph()\n graph = nx.DiGraph()\n for name, lemma in self._lemmas_info.get_parent_lemmas():\n added_children = []\n for child_n in lemma.evidence_lemmas:\n child_node = str(child_n)\n if not self._should_be_filtered( added_children, child_node ):\n added_children.append( child_node )\n \n graph.add_node( name ) # it's OK if it exists from the previous iteration\n graph.add_node( child_node )\n # lemma1 because lemma2, means that lemma2 -> lemma1\n graph.add_edge( child_node, name )\n \n self._append_source_and_target( graph )\n return graph",
"def build_graph(self):\n raise NotImplementedError",
"def _get_graph(nodes: Nodes, edges: np.ndarray):\n\n graph = nx.Graph()\n graph.add_nodes_from(nodes['id'])\n attrs = nodes.set_index('id').to_dict('index')\n nx.set_node_attributes(graph, attrs)\n graph.add_edges_from(edges)\n\n return graph",
"def get_graph(self) -> nx.classes.graph.Graph:\n G = nx.Graph()\n # add nodes\n G.add_nodes_from([(room, props) for room, props in self.get_rooms_with_properties().items()])\n # add edges\n G.add_edges_from(self.get_edges_with_properties())\n return G",
"def _build_graph(show=False):\n global G\n G = nx.Graph()\n node_labels, edge_labels = {}, {}\n for idx, dep in enumerate(A.deps):\n\n types = [\"dependent\", \"governor\"]\n\n # nodes, labels\n for x in types:\n G.add_node(str(dep[x]), word=dep[x + \"Gloss\"], pos=A.lookup[dep[x]][\"pos\"])\n node_labels[str(dep[x])] = dep[x + \"Gloss\"] + \" : \" + A.lookup[dep[x]][\"pos\"]\n\n # edges, labels\n G.add_edge(str(dep[types[0]]), str(dep[types[1]]), dep=dep[\"dep\"])\n edge_labels[(str(dep[types[0]]), str(dep[types[1]]))] = dep[\"dep\"]\n\n if show == True:\n pos = nx.spring_layout(G)\n nx.draw_networkx(G, pos=pos, labels=node_labels, node_color=\"white\", alpha=.5)\n nx.draw_networkx_edge_labels(G, pos=pos, edge_labels=edge_labels)\n plt.show()",
"def _init_graph(self):\n self.G = nx.Graph()\n self.G.add_nodes_from([1,2,3,4,5])\n self.G.add_edges_from([(1,2),(2,3),(2,4)\\\n ,(2,5),(3,4),(4,5)])",
"def build_graph(self):\n self.graph = tf.Graph()\n temp_connections = self.connections\n with self.graph.as_default():\n operations = {}\n\n # create Variables for input vertices\n for neuron_id in self.input_neurons:\n self.inputs[neuron_id] = tf.get_variable(name=str(neuron_id), shape=(),\n initializer=tf.zeros_initializer)\n deletion = []\n while len(temp_connections) > 0:\n for neuron_id in deletion:\n temp_connections.pop(neuron_id, None)\n deletion = []\n keys = list(temp_connections)\n random.shuffle(keys)\n # create input & output vertices\n for neuron_id in temp_connections:\n input_neuron_ids = temp_connections[neuron_id]\n if self.check(input_neuron_ids, operations):\n # weights\n v_weights = tf.constant(self.weights[neuron_id])\n # input vertices\n v_inputs = []\n\n for input_neuron_id in input_neuron_ids:\n if self.is_input_neuron(input_neuron_id):\n vertex = self.inputs[input_neuron_id]\n else:\n vertex = operations[input_neuron_id]\n\n v_inputs.append(vertex)\n\n deletion.append(neuron_id)\n\n # multiply weights and inputs\n mul = tf.multiply(v_inputs, v_weights, str(neuron_id))\n # sum multiplied values\n sum = tf.reduce_sum(mul, name='sum_' + str(neuron_id))\n # apply activation function\n if self.is_output_neuron(neuron_id):\n activation = tf.sigmoid(sum, name=\"output\")\n else:\n activation = tf.nn.leaky_relu(sum, alpha=0.2, name=\"relu_\" + str(neuron_id))\n\n operations[neuron_id] = activation\n if self.is_output_neuron(neuron_id):\n self.output = activation\n return self.graph, self.inputs, self.output",
"def convertGraph(G):\n G_new = nx.Graph()\n for nd, attrs in G.nodes(data=True):\n G_new.add_node(str(nd), chem=attrs['atom_symbol'])\n# G_new.add_node(str(nd), x=str(attrs['attributes'][0]),\n# y=str(attrs['attributes'][1]))\n for nd1, nd2, attrs in G.edges(data=True):\n G_new.add_edge(str(nd1), str(nd2), valence=attrs['bond_type'])\n# G_new.add_edge(str(nd1), str(nd2))\n\n return G_new",
"def create_graph_network(start_node, connections):\n graph = nx.Graph()\n graph.add_node(start_node)\n print(connections.index)\n graph.add_nodes_from(connections.index)\n edge_list = list(zip(itertools.repeat(start_node), connections.index))\n print(\"edge list is \", edge_list)\n graph.add_edges_from(edge_list)\n for i in graph.edges():\n graph[i[0]][i[1]]['weight'] = connections.loc[i[1]]['count']\n # graph[i[0]][i[1]]['proposal_number'] = connections.loc[i[1]]['proposal_number']\n # graph[i[0]][i[1]]['institution'] = connections.loc[i[1]]['institution']\n # graph[i[0]][i[1]]['proposal_title'] = connections.loc[i[1]]['proposal_title']\n # graph[i[0]][i[1]]['project_status'] = connections.loc[i[1]]['project_status']\n\n # Adding random position data to the graph.\n # pos = nx.spring_layout(graph, k=1)\n pos = nx.circular_layout(graph)\n nx.set_node_attributes(graph, 'pos', pos)\n return graph",
"def _make_graph(nodes, ways):\n graph = networkx.MultiDiGraph(crs=\"EPSG:4326\")\n ways_proj = ways.set_crs(\"EPSG:4326\").to_crs(\"EPSG:3395\")\n\n for node_id, node_attr in nodes.rename(columns={'longitude': 'x', 'latitude': 'y'}).iterrows():\n graph.add_node(node_id, **node_attr)\n\n for _, way in ways_proj.iterrows():\n\n osm_oneway_values = [\"yes\", \"true\", \"1\", \"-1\", \"T\", \"F\"]\n if \"oneway\" in way and way.oneway in osm_oneway_values:\n if way[\"oneway\"] == \"-1\" or way[\"oneway\"] == \"T\":\n # paths with a one-way value of -1 or T are one-way, but in the\n # reverse direction of the nodes' order, see osm documentation\n path_nodes = list(reversed(way.nodes))\n else:\n path_nodes = way.nodes\n # add this path (in only one direction) to the graph\n one_way = True\n\n elif \"junction\" in way and way.junction == \"roundabout\":\n # roundabout are also oneway but not tagged as is\n path_nodes = way.nodes\n one_way = True\n\n # else, this path is not tagged as one-way or it is a walking network\n # (you can walk both directions on a one-way street)\n else:\n # add this path (in both directions) to the graph and set its\n # 'oneway' attribute to False. if this is a walking network, this\n # may very well be a one-way street (as cars/bikes go), but in a\n # walking-only network it is a bi-directional edge\n path_nodes = way.nodes\n one_way = False\n\n # zip together the path nodes so you get tuples like (0,1), (1,2), (2,3)\n # and so on\n path_edges = list(zip(path_nodes[:-1], path_nodes[1:]))\n graph.add_edges_from(path_edges, **way[['id']])\n if not one_way:\n path_edges_reverse = [(v, u) for u, v in path_edges]\n graph.add_edges_from(path_edges_reverse, **way[['id']])\n\n graph = osmnx.utils_graph.add_edge_lengths(graph)\n return graph",
"def to_networkx(self):\n import networkx as nx\n G = nx.DiGraph()\n # G.graph.update(self.config)\n\n if nx.__version__.startswith('1'):\n node_dict = G.node\n else:\n node_dict = G.nodes\n\n def _defaultstyle(node, color, shape='none', **kwargs):\n node_dict[node]['fillcolor'] = color\n node_dict[node]['style'] = 'filled'\n node_dict[node]['shape'] = shape\n node_dict[node].update(kwargs)\n # node_dict[node]['color'] = color\n\n # Add all processes\n # Make inputs and outputs nodes to prevent needing a multigraph\n for proc in self.procs.values():\n G.add_node(proc.name)\n _defaultstyle(proc.name, 'turquoise', shape='ellipse', fontsize=20)\n node_dict[proc.name]['label'] = '{}:\\n{}'.format(proc.name,\n proc.type)\n\n for iport in proc.iports.values():\n iport_name = iport.absname()\n G.add_node(iport_name)\n G.add_edge(iport_name, proc.name)\n node_dict[iport_name]['label'] = iport.name\n _defaultstyle(iport_name, '#fefefe', fontsize=14)\n\n for oport in proc.oports.values():\n oport_name = oport.absname()\n G.add_node(oport_name)\n G.add_edge(proc.name, oport_name)\n node_dict[oport_name]['label'] = oport.name\n _defaultstyle(oport_name, '#f0f0f0', fontsize=14)\n\n # Add all connections\n for proc in self.procs.values():\n for iport in proc.iports.values():\n iport_name = iport.absname()\n for oport in iport.connections:\n if oport is not None:\n oport_name = oport.absname()\n G.add_edge(oport_name, iport_name)\n return G",
"def _construct_graph(self):\n raise NotImplementedError",
"def build_graph(friends: list, mutuals: dict) -> nx.classes.graph.Graph:\n friends_ids = [friend['id'] for friend in friends]\n G = nx.Graph()\n G.add_nodes_from(range(len(friends_ids)))\n\n for idx in tqdm(friends_ids):\n node_id = friends_ids.index(idx)\n G.nodes[node_id]['vk_id'] = idx\n G.nodes[node_id]['first_name'] = friends[node_id]['first_name']\n G.nodes[node_id]['last_name'] = friends[node_id]['last_name']\n G.nodes[node_id]['gender'] = friends[node_id]['sex']\n G.nodes[node_id]['relation'] = friends[node_id].get('relation')\n G.nodes[node_id]['city'] = friends[node_id].get('city', {}).get('title')\n G.nodes[node_id]['country'] = friends[node_id].get('country', {}).get('title')\n G.nodes[node_id]['schools'] = friends[node_id].get('schools')\n G.nodes[node_id]['universities'] = friends[node_id].get('universities')\n G.nodes[node_id]['career'] = friends[node_id].get('career')\n idx_mutuals = mutuals.get(idx)\n if idx_mutuals != None:\n edges = [(node_id, friends_ids.index(friend_id)) for friend_id in idx_mutuals]\n G.add_edges_from(edges)\n\n return G",
"def _build_dependency_graph(self):\n\n #\n # Find the binary roles\n #\n nodes, roles = self._find_roles()\n\n #\n # Build the graph\n #\n working_list = list(set(nodes.keys()))\n\n setters = [b for b, r in roles.items() if Role.SETTER in r or Role.SETTER_GETTER in r]\n\n while working_list:\n b = working_list[0]\n working_list = working_list[1:]\n\n if nodes[b] not in self._graph:\n self._graph[nodes[b]] = []\n\n # it's a root node\n if Role.GETTER not in roles[b] and Role.SETTER_GETTER not in roles[b]:\n nodes[b].set_root()\n\n # takes params from some other binary\n else:\n is_orphan = True\n for setter in setters:\n setter_strings_set = set(nodes[setter].role_strings)\n node_strings_set = set(nodes[b].role_strings)\n if setter_strings_set.intersection(node_strings_set):\n if nodes[setter] not in self._graph:\n self._graph[nodes[setter]] = []\n self._graph[nodes[setter]].append(nodes[b])\n is_orphan = False\n\n # mark orphans\n if is_orphan:\n nodes[b].set_orphan()\n\n # Clean up\n for k, childs in self._graph.iteritems():\n self._graph[k] = list(set(childs))\n\n # set leaves\n for k, c in self._graph.iteritems():\n if not c:\n k.set_leaf()\n\n # post processing:\n # remove those nodes that are not orphans\n # and are not network parsers\n\n nodes = self.nodes\n children = [c for x in self._graph.values() for c in x if x]\n leafs_non_orphan = [n for n in nodes if n.leaf and not n.orphan]\n seed_names = [x.split('/')[-1] for x in self._seed_bins]\n spurious_nodes = [n for n in leafs_non_orphan if n not in children and n.bin.split('/')[-1] not in seed_names]\n for to_rem in spurious_nodes:\n del self._graph[to_rem]",
"def build_graph(self, nodes, edges):\n\n log.info(\"Building Graph with [%s] nodes and [%s] edges\" % ('NOT_IMPLEMENTED', 'NOT_IMPLEMENTED'))\n\n for node, node_type in nodes.items():\n self.graph.node(node, node.replace('tmp_git_repo/', '', 1), color=NODE_COLORS[node_type])\n\n for left_edge, right_edges in edges.items():\n for right_edge in right_edges:\n self.graph.edge(left_edge, right_edge.import_path, label=right_edge.import_name)",
"def generate_graph(self):\n\t\tif self.joins == None:\n\t\t\tself.get_joins()\n\t\tprint('generating Networkx DiGraph object of {database} from query results'.format(**self.__dict__))\n\t\t# save distinct Child column values\n\t\tchilds = set([j.Child for j in self.joins])\n\t\t# save distinct Parent column values\n\t\tparents = set([j.Parent for j in self.joins])\n\t\t# save names of Leaf tables\n\t\tleafs = list(childs - parents)\n\t\tself._traverse_joins(leafs)"
] | [
"0.73159355",
"0.67514896",
"0.6639457",
"0.663262",
"0.6631219",
"0.6617338",
"0.65870404",
"0.6585843",
"0.65561634",
"0.64985657",
"0.64818686",
"0.64802456",
"0.64401877",
"0.64243275",
"0.64049304",
"0.6382079",
"0.63409054",
"0.6319503",
"0.6298616",
"0.6286521",
"0.62707406",
"0.6231678",
"0.6230883",
"0.6181396",
"0.6176297",
"0.61700135",
"0.6168684",
"0.61603206",
"0.6153772",
"0.6134785"
] | 0.81062376 | 0 |
Build a networkx graph object from variables and relations. | def as_networkx_bipartite_graph(variables, relations):
graph = nx.Graph()
# One node for each variables
graph.add_nodes_from([v.name for v in variables], bipartite=0)
graph.add_nodes_from([r.name for r in relations], bipartite=1)
for r in relations:
for e in r.dimensions:
graph.add_edge(r.name, e.name)
return graph | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def as_networkx_graph(variables, relations):\n graph = nx.Graph()\n\n # One node for each variables\n graph.add_nodes_from([v.name for v in variables])\n\n for r in relations:\n for p in all_pairs([e.name for e in r.dimensions]):\n graph.add_edge(*p)\n return graph",
"def initialize_graph(compound_relations, relation_types):\n graph = nx.DiGraph()\n for compound, targets in compound_relations.items():\n for target, relation in targets.items():\n if relation in relation_types:\n graph.add_edge(compound, target)\n return graph",
"def build_graph(self):\n for node in self.graph.nodes():\n self.c2py[node] = PyNode(node)\n for _input in node.inputs():\n if _input not in self.c2py:\n self.c2py[_input] = PyNode(_input, True)\n if _input in self.forward_edge:\n self.forward_edge[_input].append(node)\n else:\n self.forward_edge[_input] = [node]\n for output in node.outputs():\n if output not in self.c2py:\n self.c2py[output] = PyNode(output, True)\n if node in self.forward_edge:\n self.forward_edge[node].append(output)\n else:\n self.forward_edge[node] = [output]",
"def build_graph(edges):\n \n G = nx.MultiGraph()\n G.add_edges_from(edges)\n return G",
"def parse_graph(self):\n\t\tnx_graph = nx.Graph()\n\t\tfor node in self.vertices:\n\t\t\tnx_graph.add_node(node)\n\n\t\tfor edge in self.edges:\n\t\t\tnode1, node2, weight = edge\n\t\t\tnx_graph.add_edge(node1, node2, weight=weight)\n\n\t\treturn nx_graph",
"def build_graph(self):\n pass",
"def to_networkx(self):\n g = nx.Graph()\n for v in self.vs.values():\n g.add_node(v)\n for v in self.fs:\n g.add_node(v)\n for u in v.neighbors:\n g.add_edge(v, u)\n return g",
"def _build_graph(self):\n pass",
"def build_node_graph(self):\n G = pgv.AGraph(strict=False, directed=True)\n temp_dict = defaultdict(int) #key - from_to_ip, val - counter\n\n for i, ip in enumerate(self.node_graph_dict.keys()):\n G.add_node(ip, shape='rect', label='%d' % i)\n logger.info(\"All nodes added\")\n\n for ip, droplist in self.node_graph_dict.iteritems():\n for gnid, dropids in droplist:\n for did in dropids:\n tip = self.gnid_ip_dict[self.oid_gnid_dict[did]]\n k = '{0}_{1}'.format(ip, tip)\n temp_dict[k] += 1\n\n for k, v in temp_dict.iteritems():\n ks = k.split('_')\n G.add_edge(ks[0], ks[1], weight=v)\n\n return G",
"def build_graph(self):\n self.__create_placeholders()\n self.__create_encoder()\n self.__create_latent()\n self.__create_decoder()\n self.__create_loss()\n self.__create_generate()\n self.__create_reconstruct()\n self.__create_optimizer()\n self.__create_summary()",
"def build_graph(graph_dict): \n #make networkX graph\n G = nx.Graph()\n G.add_nodes_from(graph_dict.keys())\n for key in graph_dict:\n for i in range(len(graph_dict[key])):\n G.add_edge(key,graph_dict[key][i])\n return G",
"def build_graph(self):\n\t\tself._create_placeholders()\n\t\tself._create_embedding()\n\t\tself._create_recurrent_layers()\n\t\tself._create_de_embedding()\n\t\tself._create_loss()\n\t\tself._create_optimizer()\n\t\tself._create_summaries()",
"def build_graph():\n file = open(\"../data/data.json\", \"r\")\n data = json.load(file)\n node_dict = {}\n for id in data:\n node_dict[id] = Node(data[id][\"name\"], data[id][\"product\"], data[id][\"production_volume\"])\n for id in data:\n current_node = node_dict[id]\n for costumer_id in data[id][\"costumers\"]:\n current_node.costumers.append(node_dict[str(costumer_id)])\n current_node.out_edge_capacity_drop[node_dict[str(costumer_id)].name] = 0\n for supplier_id in data[id][\"suppliers\"]:\n current_node.suppliers.append(node_dict[str(supplier_id)])\n current_node.in_edge_capacity_drop[node_dict[str(supplier_id)].name] = 0\n return node_dict",
"def example_graph():\n g = nx.Graph()\n g.add_edges_from([('A', 'B'), ('A', 'C'), ('B', 'C'), ('B', 'D'), ('D', 'E'), ('D', 'F'), ('D', 'G'), ('E', 'F'), ('G', 'F')])\n return g",
"def _create_nx_graph(self):\n #_graph = nx.Graph()\n graph = nx.DiGraph()\n for name, lemma in self._lemmas_info.get_parent_lemmas():\n added_children = []\n for child_n in lemma.evidence_lemmas:\n child_node = str(child_n)\n if not self._should_be_filtered( added_children, child_node ):\n added_children.append( child_node )\n \n graph.add_node( name ) # it's OK if it exists from the previous iteration\n graph.add_node( child_node )\n # lemma1 because lemma2, means that lemma2 -> lemma1\n graph.add_edge( child_node, name )\n \n self._append_source_and_target( graph )\n return graph",
"def build_graph(self):\n raise NotImplementedError",
"def _get_graph(nodes: Nodes, edges: np.ndarray):\n\n graph = nx.Graph()\n graph.add_nodes_from(nodes['id'])\n attrs = nodes.set_index('id').to_dict('index')\n nx.set_node_attributes(graph, attrs)\n graph.add_edges_from(edges)\n\n return graph",
"def get_graph(self) -> nx.classes.graph.Graph:\n G = nx.Graph()\n # add nodes\n G.add_nodes_from([(room, props) for room, props in self.get_rooms_with_properties().items()])\n # add edges\n G.add_edges_from(self.get_edges_with_properties())\n return G",
"def _build_graph(show=False):\n global G\n G = nx.Graph()\n node_labels, edge_labels = {}, {}\n for idx, dep in enumerate(A.deps):\n\n types = [\"dependent\", \"governor\"]\n\n # nodes, labels\n for x in types:\n G.add_node(str(dep[x]), word=dep[x + \"Gloss\"], pos=A.lookup[dep[x]][\"pos\"])\n node_labels[str(dep[x])] = dep[x + \"Gloss\"] + \" : \" + A.lookup[dep[x]][\"pos\"]\n\n # edges, labels\n G.add_edge(str(dep[types[0]]), str(dep[types[1]]), dep=dep[\"dep\"])\n edge_labels[(str(dep[types[0]]), str(dep[types[1]]))] = dep[\"dep\"]\n\n if show == True:\n pos = nx.spring_layout(G)\n nx.draw_networkx(G, pos=pos, labels=node_labels, node_color=\"white\", alpha=.5)\n nx.draw_networkx_edge_labels(G, pos=pos, edge_labels=edge_labels)\n plt.show()",
"def _init_graph(self):\n self.G = nx.Graph()\n self.G.add_nodes_from([1,2,3,4,5])\n self.G.add_edges_from([(1,2),(2,3),(2,4)\\\n ,(2,5),(3,4),(4,5)])",
"def build_graph(self):\n self.graph = tf.Graph()\n temp_connections = self.connections\n with self.graph.as_default():\n operations = {}\n\n # create Variables for input vertices\n for neuron_id in self.input_neurons:\n self.inputs[neuron_id] = tf.get_variable(name=str(neuron_id), shape=(),\n initializer=tf.zeros_initializer)\n deletion = []\n while len(temp_connections) > 0:\n for neuron_id in deletion:\n temp_connections.pop(neuron_id, None)\n deletion = []\n keys = list(temp_connections)\n random.shuffle(keys)\n # create input & output vertices\n for neuron_id in temp_connections:\n input_neuron_ids = temp_connections[neuron_id]\n if self.check(input_neuron_ids, operations):\n # weights\n v_weights = tf.constant(self.weights[neuron_id])\n # input vertices\n v_inputs = []\n\n for input_neuron_id in input_neuron_ids:\n if self.is_input_neuron(input_neuron_id):\n vertex = self.inputs[input_neuron_id]\n else:\n vertex = operations[input_neuron_id]\n\n v_inputs.append(vertex)\n\n deletion.append(neuron_id)\n\n # multiply weights and inputs\n mul = tf.multiply(v_inputs, v_weights, str(neuron_id))\n # sum multiplied values\n sum = tf.reduce_sum(mul, name='sum_' + str(neuron_id))\n # apply activation function\n if self.is_output_neuron(neuron_id):\n activation = tf.sigmoid(sum, name=\"output\")\n else:\n activation = tf.nn.leaky_relu(sum, alpha=0.2, name=\"relu_\" + str(neuron_id))\n\n operations[neuron_id] = activation\n if self.is_output_neuron(neuron_id):\n self.output = activation\n return self.graph, self.inputs, self.output",
"def convertGraph(G):\n G_new = nx.Graph()\n for nd, attrs in G.nodes(data=True):\n G_new.add_node(str(nd), chem=attrs['atom_symbol'])\n# G_new.add_node(str(nd), x=str(attrs['attributes'][0]),\n# y=str(attrs['attributes'][1]))\n for nd1, nd2, attrs in G.edges(data=True):\n G_new.add_edge(str(nd1), str(nd2), valence=attrs['bond_type'])\n# G_new.add_edge(str(nd1), str(nd2))\n\n return G_new",
"def create_graph_network(start_node, connections):\n graph = nx.Graph()\n graph.add_node(start_node)\n print(connections.index)\n graph.add_nodes_from(connections.index)\n edge_list = list(zip(itertools.repeat(start_node), connections.index))\n print(\"edge list is \", edge_list)\n graph.add_edges_from(edge_list)\n for i in graph.edges():\n graph[i[0]][i[1]]['weight'] = connections.loc[i[1]]['count']\n # graph[i[0]][i[1]]['proposal_number'] = connections.loc[i[1]]['proposal_number']\n # graph[i[0]][i[1]]['institution'] = connections.loc[i[1]]['institution']\n # graph[i[0]][i[1]]['proposal_title'] = connections.loc[i[1]]['proposal_title']\n # graph[i[0]][i[1]]['project_status'] = connections.loc[i[1]]['project_status']\n\n # Adding random position data to the graph.\n # pos = nx.spring_layout(graph, k=1)\n pos = nx.circular_layout(graph)\n nx.set_node_attributes(graph, 'pos', pos)\n return graph",
"def _make_graph(nodes, ways):\n graph = networkx.MultiDiGraph(crs=\"EPSG:4326\")\n ways_proj = ways.set_crs(\"EPSG:4326\").to_crs(\"EPSG:3395\")\n\n for node_id, node_attr in nodes.rename(columns={'longitude': 'x', 'latitude': 'y'}).iterrows():\n graph.add_node(node_id, **node_attr)\n\n for _, way in ways_proj.iterrows():\n\n osm_oneway_values = [\"yes\", \"true\", \"1\", \"-1\", \"T\", \"F\"]\n if \"oneway\" in way and way.oneway in osm_oneway_values:\n if way[\"oneway\"] == \"-1\" or way[\"oneway\"] == \"T\":\n # paths with a one-way value of -1 or T are one-way, but in the\n # reverse direction of the nodes' order, see osm documentation\n path_nodes = list(reversed(way.nodes))\n else:\n path_nodes = way.nodes\n # add this path (in only one direction) to the graph\n one_way = True\n\n elif \"junction\" in way and way.junction == \"roundabout\":\n # roundabout are also oneway but not tagged as is\n path_nodes = way.nodes\n one_way = True\n\n # else, this path is not tagged as one-way or it is a walking network\n # (you can walk both directions on a one-way street)\n else:\n # add this path (in both directions) to the graph and set its\n # 'oneway' attribute to False. if this is a walking network, this\n # may very well be a one-way street (as cars/bikes go), but in a\n # walking-only network it is a bi-directional edge\n path_nodes = way.nodes\n one_way = False\n\n # zip together the path nodes so you get tuples like (0,1), (1,2), (2,3)\n # and so on\n path_edges = list(zip(path_nodes[:-1], path_nodes[1:]))\n graph.add_edges_from(path_edges, **way[['id']])\n if not one_way:\n path_edges_reverse = [(v, u) for u, v in path_edges]\n graph.add_edges_from(path_edges_reverse, **way[['id']])\n\n graph = osmnx.utils_graph.add_edge_lengths(graph)\n return graph",
"def to_networkx(self):\n import networkx as nx\n G = nx.DiGraph()\n # G.graph.update(self.config)\n\n if nx.__version__.startswith('1'):\n node_dict = G.node\n else:\n node_dict = G.nodes\n\n def _defaultstyle(node, color, shape='none', **kwargs):\n node_dict[node]['fillcolor'] = color\n node_dict[node]['style'] = 'filled'\n node_dict[node]['shape'] = shape\n node_dict[node].update(kwargs)\n # node_dict[node]['color'] = color\n\n # Add all processes\n # Make inputs and outputs nodes to prevent needing a multigraph\n for proc in self.procs.values():\n G.add_node(proc.name)\n _defaultstyle(proc.name, 'turquoise', shape='ellipse', fontsize=20)\n node_dict[proc.name]['label'] = '{}:\\n{}'.format(proc.name,\n proc.type)\n\n for iport in proc.iports.values():\n iport_name = iport.absname()\n G.add_node(iport_name)\n G.add_edge(iport_name, proc.name)\n node_dict[iport_name]['label'] = iport.name\n _defaultstyle(iport_name, '#fefefe', fontsize=14)\n\n for oport in proc.oports.values():\n oport_name = oport.absname()\n G.add_node(oport_name)\n G.add_edge(proc.name, oport_name)\n node_dict[oport_name]['label'] = oport.name\n _defaultstyle(oport_name, '#f0f0f0', fontsize=14)\n\n # Add all connections\n for proc in self.procs.values():\n for iport in proc.iports.values():\n iport_name = iport.absname()\n for oport in iport.connections:\n if oport is not None:\n oport_name = oport.absname()\n G.add_edge(oport_name, iport_name)\n return G",
"def _construct_graph(self):\n raise NotImplementedError",
"def build_graph(friends: list, mutuals: dict) -> nx.classes.graph.Graph:\n friends_ids = [friend['id'] for friend in friends]\n G = nx.Graph()\n G.add_nodes_from(range(len(friends_ids)))\n\n for idx in tqdm(friends_ids):\n node_id = friends_ids.index(idx)\n G.nodes[node_id]['vk_id'] = idx\n G.nodes[node_id]['first_name'] = friends[node_id]['first_name']\n G.nodes[node_id]['last_name'] = friends[node_id]['last_name']\n G.nodes[node_id]['gender'] = friends[node_id]['sex']\n G.nodes[node_id]['relation'] = friends[node_id].get('relation')\n G.nodes[node_id]['city'] = friends[node_id].get('city', {}).get('title')\n G.nodes[node_id]['country'] = friends[node_id].get('country', {}).get('title')\n G.nodes[node_id]['schools'] = friends[node_id].get('schools')\n G.nodes[node_id]['universities'] = friends[node_id].get('universities')\n G.nodes[node_id]['career'] = friends[node_id].get('career')\n idx_mutuals = mutuals.get(idx)\n if idx_mutuals != None:\n edges = [(node_id, friends_ids.index(friend_id)) for friend_id in idx_mutuals]\n G.add_edges_from(edges)\n\n return G",
"def _build_dependency_graph(self):\n\n #\n # Find the binary roles\n #\n nodes, roles = self._find_roles()\n\n #\n # Build the graph\n #\n working_list = list(set(nodes.keys()))\n\n setters = [b for b, r in roles.items() if Role.SETTER in r or Role.SETTER_GETTER in r]\n\n while working_list:\n b = working_list[0]\n working_list = working_list[1:]\n\n if nodes[b] not in self._graph:\n self._graph[nodes[b]] = []\n\n # it's a root node\n if Role.GETTER not in roles[b] and Role.SETTER_GETTER not in roles[b]:\n nodes[b].set_root()\n\n # takes params from some other binary\n else:\n is_orphan = True\n for setter in setters:\n setter_strings_set = set(nodes[setter].role_strings)\n node_strings_set = set(nodes[b].role_strings)\n if setter_strings_set.intersection(node_strings_set):\n if nodes[setter] not in self._graph:\n self._graph[nodes[setter]] = []\n self._graph[nodes[setter]].append(nodes[b])\n is_orphan = False\n\n # mark orphans\n if is_orphan:\n nodes[b].set_orphan()\n\n # Clean up\n for k, childs in self._graph.iteritems():\n self._graph[k] = list(set(childs))\n\n # set leaves\n for k, c in self._graph.iteritems():\n if not c:\n k.set_leaf()\n\n # post processing:\n # remove those nodes that are not orphans\n # and are not network parsers\n\n nodes = self.nodes\n children = [c for x in self._graph.values() for c in x if x]\n leafs_non_orphan = [n for n in nodes if n.leaf and not n.orphan]\n seed_names = [x.split('/')[-1] for x in self._seed_bins]\n spurious_nodes = [n for n in leafs_non_orphan if n not in children and n.bin.split('/')[-1] not in seed_names]\n for to_rem in spurious_nodes:\n del self._graph[to_rem]",
"def build_graph(self, nodes, edges):\n\n log.info(\"Building Graph with [%s] nodes and [%s] edges\" % ('NOT_IMPLEMENTED', 'NOT_IMPLEMENTED'))\n\n for node, node_type in nodes.items():\n self.graph.node(node, node.replace('tmp_git_repo/', '', 1), color=NODE_COLORS[node_type])\n\n for left_edge, right_edges in edges.items():\n for right_edge in right_edges:\n self.graph.edge(left_edge, right_edge.import_path, label=right_edge.import_name)",
"def generate_graph(self):\n\t\tif self.joins == None:\n\t\t\tself.get_joins()\n\t\tprint('generating Networkx DiGraph object of {database} from query results'.format(**self.__dict__))\n\t\t# save distinct Child column values\n\t\tchilds = set([j.Child for j in self.joins])\n\t\t# save distinct Parent column values\n\t\tparents = set([j.Parent for j in self.joins])\n\t\t# save names of Leaf tables\n\t\tleafs = list(childs - parents)\n\t\tself._traverse_joins(leafs)"
] | [
"0.81066954",
"0.6750233",
"0.6639164",
"0.66322136",
"0.6630946",
"0.6617218",
"0.6586329",
"0.65858966",
"0.65559703",
"0.6498455",
"0.64820826",
"0.6480336",
"0.6440278",
"0.64248806",
"0.6404701",
"0.6381441",
"0.63398075",
"0.6318121",
"0.62991834",
"0.6286606",
"0.62709624",
"0.6231524",
"0.6231379",
"0.61804515",
"0.61755943",
"0.61700547",
"0.6168517",
"0.6159621",
"0.61529714",
"0.6134124"
] | 0.73165965 | 1 |
Display the variables and relation as a graph, using networkx and matplotlib. | def display_graph(variables, relations):
graph = as_networkx_graph(variables, relations)
# Do not crash if matplotlib is not installed
try:
import matplotlib.pyplot as plt
nx.draw_networkx(graph, with_labels=True)
# nx.draw_random(graph)
# nx.draw_circular(graph)
# nx.draw_spectral(graph)
plt.show()
except ImportError:
print("ERROR: cannot display graph, matplotlib is not installed") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def visualize_dependency_graph(self):\n if self.graph is None:\n self.logger.error(\"Graph value none cannot be plotted\")\n return\n\n nx.draw(self.graph, cmap=plt.get_cmap('jet'), with_labels=True)\n plt.show()",
"def plot_graph(self) -> None:",
"def display_bipartite_graph(variables, relations):\n graph = as_networkx_bipartite_graph(variables, relations)\n\n # Do not crash if matplotlib is not installed\n try:\n import matplotlib.pyplot as plt\n\n pos = nx.drawing.spring_layout(graph)\n variables = set(n for n, d in graph.nodes(data=True) if d[\"bipartite\"] == 0)\n factors = set(graph) - variables\n nx.draw_networkx_nodes(\n graph,\n pos=pos,\n with_labels=True,\n nodelist=variables,\n node_shape=\"o\",\n node_color=\"b\",\n label=\"variables\",\n alpha=0.5,\n )\n nx.draw_networkx_nodes(\n graph,\n pos=pos,\n with_labels=True,\n nodelist=factors,\n node_shape=\"s\",\n node_color=\"r\",\n label=\"factors\",\n alpha=0.5,\n )\n nx.draw_networkx_labels(graph, pos=pos)\n nx.draw_networkx_edges(graph, pos=pos)\n # nx.draw_random(graph)\n # nx.draw_circular(graph)\n # nx.draw_spectral(graph)\n plt.show()\n except ImportError:\n print(\"ERROR: cannot display graph, matplotlib is not installed\")",
"def show_graph(g):\r\n net.draw(g,with_labels= True,font_size=16)\r\n plt.show()",
"def visualise(self) -> None:\n nx_graph = nx.DiGraph()\n\n for v in self._vertices:\n if not v.predicate:\n name = v.name.split(\"/\")[-1]\n nx_graph.add_node(name, name=name, pred=v.predicate)\n\n for v in self._vertices:\n if not v.predicate:\n v_name = v.name.split(\"/\")[-1]\n # Neighbors are predicates\n for pred in self.get_neighbors(v):\n pred_name = pred.name.split(\"/\")[-1]\n for obj in self.get_neighbors(pred):\n obj_name = obj.name.split(\"/\")[-1]\n nx_graph.add_edge(v_name, obj_name, name=pred_name)\n\n plt.figure(figsize=(10, 10))\n _pos = nx.circular_layout(nx_graph)\n nx.draw_networkx_nodes(nx_graph, pos=_pos)\n nx.draw_networkx_edges(nx_graph, pos=_pos)\n nx.draw_networkx_labels(nx_graph, pos=_pos)\n names = nx.get_edge_attributes(nx_graph, \"name\")\n nx.draw_networkx_edge_labels(nx_graph, pos=_pos, edge_labels=names)",
"def plot_graph(self):\n g = self.get_graph()\n plt.title(\"Our graph:\" + g.__str__())\n plt.xlabel(\"X\")\n plt.ylabel(\"-<\") # I should flip 'Y' letter so I decided to write it by a tricky way. :)\n for src, node in g.get_all_v().items():\n # Print the node point\n if node.location is None:\n pos = self.get_random_location() # get a elegant location\n node.location = GeoLocation(pos)\n plt.plot(node.location.x, node.location.y, marker='o', markerfacecolor='red', markersize=3, color='yellow')\n plt.text(node.location.x, node.location.y, str(node.key))\n # Print the edge line\n for dest in g.all_out_edges_of_node(src).keys():\n x1 = g.get_all_v()[src].location.x\n y1 = g.get_all_v()[src].location.y\n if g.get_all_v()[dest].location is None:\n pos = self.get_random_location()\n g.get_all_v()[dest].location = GeoLocation(pos)\n g.get_all_v()[dest].location = GeoLocation(pos)\n x2 = g.get_all_v()[dest].location.x\n y2 = g.get_all_v()[dest].location.y\n plt.arrow(x1, y1, x2 - x1, y2 - y1, width=0.00001, linewidth=0.05)\n plt.show()",
"def plot_graph(self) -> None:\n\n nodes_on_graph = self.dw_graph.get_all_v()\n for k, v in nodes_on_graph.items():\n if v.position is None:\n x_rand = random.uniform(0.5, self.dw_graph.v_size())\n y_rand = random.uniform(0.5, self.dw_graph.v_size())\n v.position = (x_rand, y_rand)\n x_vals = []\n y_vals = []\n n = list(nodes_on_graph.keys())\n for k, v in nodes_on_graph.items(): # draw nodes\n x_vals.append(v.position[0])\n y_vals.append(v.position[1])\n\n fig, ax = plt.subplots()\n plt.plot(x_vals, y_vals, 'ro', markersize=5, data=\"d\")\n\n for p, txt in enumerate(n):\n ax.annotate(n[p], (x_vals[p]+0.00003, y_vals[p]), color='g')\n\n for n in nodes_on_graph:\n n1 = self.dw_graph.get_nodes(n)\n x = n1.position[0]\n y = n1.position[1]\n for r in self.dw_graph.all_out_edges_of_node(n):\n dx = self.dw_graph.get_nodes(r).position[0]\n dy = self.dw_graph.get_nodes(r).position[1]\n ax.quiver(x, y, dx-x, dy-y, angles='xy', scale_units='xy', scale=1)\n #plt.arrow(x, y, dx - x, dy - y, head_width=0.0009, width=0.00005, length_includes_head=True)\n\n\n plt.xlabel(\"x axis \")\n plt.ylabel(\"y axis \")\n plt.title(\"The title of the graph\")\n plt.show()",
"def network_graph(net_dict=None):\n if net_dict == None:\n net_dict = {}\n else:\n G = nx.from_dict_of_lists(net_dict)\n plt.figure(num=None, figsize=(30, 30), dpi=80, facecolor='w', edgecolor='c')\n nx.draw_networkx(G, with_labels=True, alpha=0.5, edge_color='c', cmap=plt.cm.GnBu)\n plt.savefig(\"metabolism_5years.png\", bbox_inches='tight')",
"def showGraph(self, file_name = \"\"):\n \n # prepare edges and weights for visualization\n edges = self.graph.edges()\n weights = [self.graph_data[u]['pheromones'][v] for u,v in edges]\n weights_sum = sum(weights)\n weights = [ (w/weights_sum)*50 for w in weights]\n \n # prepare different shades of red to be used to optionally differentiate\n # between edges with different costs\n # to show more informatiion on the same graph\n colors = []\n max_cost = max([self.graph_data[u]['costs'][v] for u,v in edges])\n for u,v in edges:\n if self.graph_data[u]['costs'][v] <= max_cost/32:\n colors.append('#ff7f7f')\n continue\n if self.graph_data[u]['costs'][v] <= max_cost/16:\n colors.append('#ff6666')\n continue\n if self.graph_data[u]['costs'][v] <= max_cost/8:\n colors.append('#ff4c4c')\n continue\n if self.graph_data[u]['costs'][v] <= max_cost/4:\n colors.append('#ff3232')\n continue\n if self.graph_data[u]['costs'][v] <= max_cost/2:\n colors.append('#ff1919')\n continue\n if self.graph_data[u]['costs'][v] <= max_cost:\n colors.append('#ff0000')\n continue\n \n # print the graph \n pos=nx.circular_layout(self.graph)\n nx.draw( self.graph,pos=pos,node_size=200,node_color='#A8A8A8', with_labels=True,edges=edges, edge_color=colors,edge_cmap=plt.cm.Blues, width=weights)\n if file_name != \"\":\n path = \"img/\"+file_name\n plt.savefig(path, format=\"PNG\")\n plt.show()",
"def visualize_graph(edges_lst):\n G = nx.Graph()\n for edge in edges_lst:\n start = edge[0]\n end = edge[1]\n weight = edge[2]\n G.add_edge(start, end, weight=weight)\n pos = nx.planar_layout(G)\n nx.draw_networkx(G, pos)\n labels = nx.get_edge_attributes(G, 'weight')\n nx.draw_networkx_edge_labels(G, pos, edge_labels=labels)\n plt.show()",
"def showGraph(G, mate, label=\"\"):\r\n \r\n # Set the positions for all nodes and the figure size\r\n plt.close('all')\r\n plt.figure( figsize=(10, 10) )\r\n pos = nx.graphviz_layout(G, prog='sfdp', args='')\r\n \r\n # Draw the graph with node labels and a title\r\n plt.title(label)\r\n nx.draw(G, pos, node_size=400, with_labels=True)\r\n \r\n # Draw the matched edges\r\n nx.draw_networkx_edges(G, pos, edgelist=mate.items(),\r\n width=5, alpha=0.4, edge_color='b')\r\n \r\n plt.axis('off')\r\n plt.show()",
"def graphviz_prettify(self, network):\n graph_settings = {\n 'rankdir': 'LR',\n 'dpi': 60,\n }\n network.graph.update(graph_settings)\n\n for n in network.nodes():\n if isinstance(n, Variable):\n network.nodes[n]['label'] = n.name\n elif isinstance(n, Equation):\n network.nodes[n]['shape'] = 'diamond'",
"def draw(self):\n\t\tnx_graph = self.parse_graph()\n\t\tpos = nx.spring_layout(nx_graph, k=0.15, iterations=20) # to spread out the nodes\n\n\t\tnx.draw(nx_graph, pos, edge_color=\"black\", width=1, linewidths=1, node_size=500, node_color=\"pink\", alpha=0.9, with_labels=True)\n\n\t\tedge_labels = {(edge[0], edge[1]):edge[2] for edge in self.edges}\n\t\tnx.draw_networkx_edge_labels(nx_graph, pos, edge_labels=edge_labels, font_color='red')\n\n\t\tplt.show()",
"def disp_graph(graph, output_filename):\n dot = Graph(name=\"Graph\", format=\"png\") # instantiate a graph object\n for node in graph.keys(): # add nodes to the graph\n dot.node(str(node))\n for node in graph.keys(): # for every node in the input graph\n # for every other node in the input graph that the first node is connected to\n for other_node in graph[node].keys():\n dot.edge(str(node), str(other_node)) # create the edge\n dot.render(output_filename, view=True) # visualize the graph and save it",
"def plot_graph(G):\r\n pos = nx.random_layout(G)\r\n nx.draw(G, pos)\r\n edge_labels = dict([((u, v, ), d['label']) for u, v, d in\r\n G.edges(data=True)])\r\n nx.draw_networkx_edge_labels(G, pos, edge_labels=edge_labels)\r\n nx.draw_networkx_labels(G, pos, labels={i:i for i in G.nodes()},\r\n font_size=16)\r\n plt.show()",
"def plotGraph(self, title = \"Multi Layer Perceptron (MLP)\"):\n graph, pos, colorMap = self.getGraph()\n\n fig = plt.figure()\n fig.canvas.set_window_title(\"Neural Network\")\n plt.plot()\n nx.draw_networkx_nodes(graph,pos, node_color = colorMap)\n nx.draw_networkx_edges(graph,pos)\n plt.axis('off')\n plt.title(title)\n #plt.savefig(\"autoencoder.svg\", transparent = True)\n plt.show()",
"def plot_graph(self, input_graph, NX_GRAPHS):\n self.dgl_graph = input_graph\n self.NX_GRAPHS = NX_GRAPHS\n \n self.get_nodes()\n color_monomer = self.get_colors()\n \n print(dict(zip(range(len(self.nodes_list)), self.nodes_list)))\n print('Key Monomer is', self.nodes_list[np.argmax(self.node_weights)])\n \n fig, ax = plt.subplots()\n nx.draw_networkx(\n dgl.to_networkx(self.dgl_graph),\n arrows=False,\n node_size = 300*10**self.node_weights,\n node_color = [color_monomer[node] for node in self.nodes_list],\n font_size = 18,\n font_color = 'w',\n font_weight = 'bold',)\n\n plt.axis('off')\n ax.set_xlim([1.2*x for x in ax.get_xlim()])\n ax.set_ylim([1.2*y for y in ax.get_ylim()])\n plt.show()",
"def fullgraphplot(time_lower,time_upper):\n\n edges_list,node_list,title_list = graphformation(time_lower,time_upper)\n node_size = []\n for i in range(len(node_list)):\n node_size.append(5)\n g = Network(\n height=\"750px\",\n width=\"100%\",\n bgcolor=\"#222222\",\n font_color=\"white\")\n g.add_nodes(node_list,label=node_list,title=title_list, size= node_size)\n g.add_edges(edges_list)\n g.show(\"nx.html\")\n return",
"def plot_graph(self):\n plt.axis(\"off\")\n pos = nx.kamada_kawai_layout(self.graph)\n return nx.draw_networkx(self.graph, pos=pos, node_size=400)",
"def draw_graph(self):\r\n G=nx.Graph()\r\n \r\n list_location1 = []\r\n list_location2 = []\r\n list_location3 = []\r\n list_location4 = []\r\n \r\n for citizen in self.citizens:\r\n G.add_node(citizen.id)\r\n if citizen.location == 1:\r\n list_location1.append(citizen.id)\r\n elif citizen.location == 2:\r\n list_location2.append(citizen.id)\r\n elif citizen.location == 3:\r\n list_location3.append(citizen.id)\r\n else: \r\n list_location4.append(citizen.id)\r\n\r\n for citizen in self.citizens:\r\n for friend in citizen.friends:\r\n G.add_edge(citizen.id,friend.id)\r\n\r\n pos = nx.random_layout(G)\r\n nx.draw_networkx_nodes(G,pos,node_size=60,nodelist=list_location1, node_color='r')\r\n nx.draw_networkx_nodes(G,pos,node_size=60,nodelist=list_location2, node_color='g')\r\n nx.draw_networkx_nodes(G,pos,node_size=60,nodelist=list_location3, node_color='b')\r\n nx.draw_networkx_nodes(G,pos,node_size=60,nodelist=list_location4, node_color='y')\r\n nx.draw_networkx_edges(G,pos, width=1)\r\n\r\n plt.show()",
"def drawGraph(A):\n m,n = A.shape\n labels = {}\n for i in range(n):\n labels[i]=str(i)\n gr = nx.from_numpy_matrix(A.T,create_using=nx.DiGraph())\n nx.draw(gr,arrows=True,node_color='#15b01a',labels=labels)\n plt.show()",
"def plot_network(genome):\n g = genome.n\n # width = g.graph[\"size\"]\n # height = g.graph[\"size\"]\n\n # fig = plt.figure(figsize=(width,height))\n fig = plt.figure()\n fig.patch.set_facecolor('white')\n ax = fig.add_subplot(111, aspect='equal')\n # ax.set_axis_off()\n\n # collision_coords = find_collisions(genome)\n # das_coords = find_das_extended(genome)\n # slp_coords = find_slp(genome)\n slp_nodes = find_attacker_path(genome.n)\n\n # Plot the parent-child tree\n for n in g.nodes_iter():\n if g.node[n][\"parent\"] is not None:\n _line(g.node[n][\"coord\"], g.node[g.node[n][\"parent\"]][\"coord\"], zorder=0, color='k')\n\n for n in g.nodes_iter():\n coord = g.node[n][\"coord\"]\n shape = _circles\n colour = 'b'\n s = 0.4\n if n in slp_nodes:\n shape = _hexagons\n colour = 'y'\n s = 0.45\n if n == g.graph[\"source\"]:\n shape = _squares\n colour = 'g'\n if n == g.graph[\"sink\"]:\n shape = _octogons\n colour = 'k'\n s = 0.45\n shape(coord[0], coord[1], s, fc=\"white\", ec=colour)\n if(len(str(g.node[n][\"slot\"])) == 1):\n ax.text(coord[0]-0.15, coord[1]+0.15, str(g.node[n][\"slot\"]))\n elif(len(str(g.node[n][\"slot\"])) == 2):\n ax.text(coord[0]-0.25, coord[1]+0.15, str(g.node[n][\"slot\"]))\n elif(len(str(g.node[n][\"slot\"])) == 3):\n ax.text(coord[0]-0.4, coord[1]+0.15, str(g.node[n][\"slot\"]))\n else:\n ax.text(coord[0]-0.5, coord[1]+0.15, str(g.node[n][\"slot\"]))\n\n\n plt.gca().invert_yaxis()\n fig.show()",
"def _build_graph(show=False):\n global G\n G = nx.Graph()\n node_labels, edge_labels = {}, {}\n for idx, dep in enumerate(A.deps):\n\n types = [\"dependent\", \"governor\"]\n\n # nodes, labels\n for x in types:\n G.add_node(str(dep[x]), word=dep[x + \"Gloss\"], pos=A.lookup[dep[x]][\"pos\"])\n node_labels[str(dep[x])] = dep[x + \"Gloss\"] + \" : \" + A.lookup[dep[x]][\"pos\"]\n\n # edges, labels\n G.add_edge(str(dep[types[0]]), str(dep[types[1]]), dep=dep[\"dep\"])\n edge_labels[(str(dep[types[0]]), str(dep[types[1]]))] = dep[\"dep\"]\n\n if show == True:\n pos = nx.spring_layout(G)\n nx.draw_networkx(G, pos=pos, labels=node_labels, node_color=\"white\", alpha=.5)\n nx.draw_networkx_edge_labels(G, pos=pos, edge_labels=edge_labels)\n plt.show()",
"def visualize(self, A):\n G = nx.from_numpy_matrix(np.array(A))\n nx.draw(G, with_labels=True)\n plt.show()\n plt.clf()\n exit(0)",
"def plot(model, pos=None, scale=1, figsize=(15, 8), verbose=3):\n out = {}\n G = nx.DiGraph() # Directed graph\n layout='fruchterman_reingold'\n\n # Extract model if in dict\n if 'dict' in str(type(model)):\n model = model.get('model', None)\n\n # Bayesian model\n if 'BayesianModel' in str(type(model)) or 'pgmpy' in str(type(model)):\n if verbose>=3: print('[bnlearn] >Plot based on BayesianModel')\n # positions for all nodes\n pos = network.graphlayout(model, pos=pos, scale=scale, layout=layout, verbose=verbose)\n # Add directed edge with weigth\n # edges=model.edges()\n edges=[*model.edges()]\n for i in range(len(edges)):\n G.add_edge(edges[i][0], edges[i][1], weight=1, color='k')\n elif 'networkx' in str(type(model)):\n if verbose>=3: print('[bnlearn] >Plot based on networkx model')\n G = model\n pos = network.graphlayout(G, pos=pos, scale=scale, layout=layout, verbose=verbose)\n else:\n if verbose>=3: print('[bnlearn] >Plot based on adjacency matrix')\n G = network.adjmat2graph(model)\n # Get positions\n pos = network.graphlayout(G, pos=pos, scale=scale, layout=layout, verbose=verbose)\n\n # Bootup figure\n plt.figure(figsize=figsize)\n # nodes\n nx.draw_networkx_nodes(G, pos, node_size=500, alpha=0.85)\n # edges\n colors = [G[u][v].get('color', 'k') for u, v in G.edges()]\n weights = [G[u][v].get('weight', 1) for u, v in G.edges()]\n nx.draw_networkx_edges(G, pos, arrowstyle='->', edge_color=colors, width=weights)\n # Labels\n nx.draw_networkx_labels(G, pos, font_size=20, font_family='sans-serif')\n # Get labels of weights\n # labels = nx.get_edge_attributes(G,'weight')\n # Plot weights\n nx.draw_networkx_edge_labels(G, pos, edge_labels=nx.get_edge_attributes(G, 'weight'))\n # Making figure nice\n ax = plt.gca()\n ax.set_axis_off()\n plt.show()\n\n # Store\n out['pos']=pos\n out['G']=G\n return(out)",
"def show_graph(self):\n print(f'|V| = {self.V}, |E| = {self.E}')\n for n in range(1, self.V+1):\n print(f'[{n}] -> {self.adjacency_list[n]}')",
"def plot_dag(\n self,\n filename,\n traverser,\n node_size=500,\n label_font_size=12,\n text_angle=0,\n image_width=16,\n image_height=12,\n ):\n # map nodes to a color for their operation type\n # https://stackoverflow.com/questions/27030473/how-to-set-colors-for-nodes-in-networkx-python\n color_map = []\n colors = [\"#fbb4ae\", \"#b3cde3\", \"#ccebc5\", \"#decbe4\", \"#fed9a6\"]\n for node in self.G2:\n if self.node_map[node] == OperationType.reader.value:\n color_map.append(colors[0])\n elif self.node_map[node] == OperationType.pipeline.value:\n color_map.append(colors[1])\n elif self.node_map[node] == OperationType.model.value:\n color_map.append(colors[2])\n elif self.node_map[node] == OperationType.writer.value:\n color_map.append(colors[3])\n else:\n color_map.append(colors[4])\n\n fig = plt.figure(figsize=(image_width, image_height))\n ax = plt.subplot(111)\n ax.set_title(filename, fontsize=10)\n\n try:\n import pydot\n from networkx.drawing.nx_pydot import graphviz_layout\n except ImportError: # pragma: no cover\n raise ImportError(\n \"This example needs Graphviz and pydot.\"\n \"Please refer to the Plotting requirements in the README\"\n )\n\n # pos = nx.spring_layout(G)\n # pos = nx.circular_layout(G)\n # pos = nx.kamada_kawai_layout(G)\n # pos = nx.shell_layout(G)\n # pos = nx.spectral_layout(G)\n pos = graphviz_layout(self.G2, prog=\"dot\") # , prog='twopi', args='')\n\n nx.draw(\n self.G2,\n pos,\n node_size=node_size,\n node_color=color_map,\n edge_color=\"#939393\",\n font_size=8,\n font_weight=\"bold\",\n )\n # nx.draw_networkx_nodes(G, pos, node_color='b', node_size=500, alpha=0.8)\n\n if len(self.conditional_nodes) > 0:\n cnodes = nx.draw_networkx_nodes(\n self.G2,\n pos,\n node_color=\"#e6b655\",\n node_size=1.5 * node_size,\n alpha=0.8,\n node_shape=\"D\",\n nodelist=list(self.conditional_nodes),\n )\n cnodes.set_edgecolor(\"red\")\n\n # nx.draw_networkx_labels(self.G2,pos, font_size=9)\n\n text = nx.draw_networkx_labels(\n self.G2, pos, font_size=label_font_size\n )\n\n if traverser:\n # map node name to sequence number\n sequence = traverser.traversal_list()\n idx = list(range(1, len(sequence) + 1))\n d = dict(zip(sequence, idx))\n\n # let's plot the sequence numner above the node. How far above it?\n ys = [t._y for _, t in text.items()]\n ysrange = max(ys) - min(ys)\n offset = 0.02 * abs(ysrange)\n\n for _, t in text.items():\n t.set_rotation(text_angle)\n\n if traverser:\n plt.text(t._x, t._y + offset, d[t._text], fontsize=24, color=\"red\")\n\n plt.axis(\"off\")\n plt.tight_layout()\n plt.savefig(filename, format=\"PNG\")\n logging.info(\"Graph written to %s\" % filename)",
"def as_networkx_graph(variables, relations):\n graph = nx.Graph()\n\n # One node for each variables\n graph.add_nodes_from([v.name for v in variables])\n\n for r in relations:\n for p in all_pairs([e.name for e in r.dimensions]):\n graph.add_edge(*p)\n return graph",
"def draw(self):\n nx.draw_networkx(self.rc)",
"def plot_graph(self, graph, subplot=False, axes=None):\n if subplot:\n plt.sca(axes[1, 1])\n axes[1, 1].axis('off')\n else:\n plt.figure(figsize=(5, 5))\n if len(graph.nodes) == 4:\n pos = {(0, 0): [0, 1], (0, 1): [1, 1], (1, 0): [0, 0], (1, 1): [1, 0]}\n else:\n pos = nx.circular_layout(graph)\n nx.draw_networkx_nodes(\n graph, pos, node_size=1800, node_color='w', edgecolors='k')\n nx.draw_networkx_edges(\n graph,\n pos,\n node_size=1800,\n edge_color='k',\n arrowstyle='->',\n arrowsize=10,\n width=3)\n nx.draw_networkx_labels(self.G, pos, {x: x for x in self.V}, font_size=14)"
] | [
"0.75382435",
"0.7278241",
"0.7211741",
"0.71995574",
"0.70474607",
"0.69660336",
"0.6962253",
"0.68822443",
"0.6876223",
"0.68665344",
"0.685607",
"0.6749594",
"0.67388505",
"0.67268574",
"0.6711935",
"0.6700764",
"0.6693562",
"0.6683998",
"0.6630944",
"0.6603868",
"0.6555654",
"0.65506417",
"0.6545641",
"0.6501951",
"0.6494064",
"0.6486673",
"0.6464168",
"0.64488745",
"0.6447923",
"0.64304465"
] | 0.8590499 | 0 |
Compute the graph diameter(s). If the graph contains several independent sub graph, returns a list the diamater of each of the subgraphs. | def graph_diameter(variables, relations):
diams = []
g = as_networkx_graph(variables, relations)
components = (g.subgraph(c).copy() for c in nx.connected_components(g))
for c in components:
diams.append(nx.diameter(c))
return diams | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def diameter(self):\n\n v = self.vertices()\n pairs = [ (v[i],v[j]) for i in range(len(v)-1) for j in range(i+1, len(v))]\n smallest_paths = []\n for (s,e) in pairs:\n paths = self.find_all_path(s,e)\n smallest = sorted(paths, key=len)[0]\n smallest_paths.append(smallest)\n\n smallest_paths.sort(key=len)\n\n # Print the list smallest_paths\n\n # Longest path is at the end of list\n # ie diameter corresponds to length of this path\n\n diameter = len(smallest_paths[-1]) -1\n return diameter",
"def find_diameter(self):\n all_ways = []\n for vertex1 in self.graph.keys():\n for vertex2 in self.graph.keys():\n if vertex2 != vertex1:\n result = self.pathFinder(vertex1, vertex2)\n for path in result:\n all_ways.append(len(path) - 1)\n self.diameter = max(all_ways)\n print(f\"Diameter of network is {self.diameter}\")",
"def topo_diameter(self):\n import math\n \n Temp = 0\n for i in range(self.nodenum):\n for j in range(self.nodenum):\n pathlist = []\n self.pathij(i, j, pathlist)\n distance = []\n \n for k in range(len(pathlist)):\n distance.append(len(pathlist[k]) - 1)\n \n if(len(distance) == 0):\n continue\n else:\n if(min(distance) >= Temp):\n Temp = min(distance)\n \n self.topodiameter = Temp",
"def diameter(graph):\r\n max_distance = 0\r\n for vertex in graph:\r\n new_dist = max_dist(graph, vertex)\r\n if new_dist > max_distance:\r\n max_distance = new_dist\r\n return max_distance",
"def spatial_diameter(self):\n import math\n \n Temp = 0\n for i in range(self.nodenum):\n for j in range(self.nodenum):\n pathlist = []\n self.pathij(i, j, pathlist)\n distance = []\n \n for k in range(len(pathlist)):\n Temp2 = 0\n for m in range(len(pathlist[k]) - 1):\n Temp2 += self.Dismatrix[pathlist[k][m], pathlist[k][m+1]]\n distance.append(Temp2)\n \n if(len(distance) == 0):\n continue\n else:\n if(min(distance) >= Temp):\n Temp = min(distance)\n \n self.diameter = Temp",
"def diameter(self):\n return self._diameter",
"def diameter(self):\n return self._diameter",
"def undirected_diameter(self) -> int:\n return nx.diameter(self.to_undirected())",
"def num_of_subgraphs(self):\n \n G = self.to_undirected_graph()\n \n count = G.num_subgraph()\n \n print('The number of disconnected components in the graph is ', count)",
"def get_pupil_diameter(dlc):\r\n diameters = []\r\n # Get the x,y coordinates of the four pupil points\r\n top, bottom, left, right = [np.vstack((dlc[f'pupil_{point}_r_x'], dlc[f'pupil_{point}_r_y']))\r\n for point in ['top', 'bottom', 'left', 'right']]\r\n # First compute direct diameters\r\n diameters.append(np.linalg.norm(top - bottom, axis=0))\r\n diameters.append(np.linalg.norm(left - right, axis=0))\r\n\r\n # For non-crossing edges, estimate diameter via circle assumption\r\n for pair in [(top, left), (top, right), (bottom, left), (bottom, right)]:\r\n diameters.append(np.linalg.norm(pair[0] - pair[1], axis=0) * 2 ** 0.5)\r\n\r\n # Ignore all nan runtime warning\r\n with warnings.catch_warnings():\r\n warnings.simplefilter(\"ignore\", category=RuntimeWarning)\r\n return np.nanmedian(diameters, axis=0)",
"def _compute_dist_cartesian(graph):\r\n for edge in graph.edges():\r\n node1, node2 = edge\r\n dx = np.abs(graph.nodes[node1]['xcoord'] - graph.nodes[node2]['xcoord'])\r\n dy = np.abs(graph.nodes[node1]['ycoord'] - graph.nodes[node2]['ycoord'])\r\n dist = np.round(np.sqrt(np.square(dx) + np.square(dy)), 5)\r\n graph.edges[node1, node2]['length'] = dist",
"def diameter(self):\n return self.radius * 2",
"def diameter(self):\n return self.radius * 2",
"def directed_dfs(digraph, start, end, max_total_dist, max_dist_outdoors):\n\n\n path = [[],0 , 0]\n best_path = get_best_path(digraph, start, end, path, max_dist_outdoors, max_total_dist, best_path = None)\n\n if best_path[0] is None:\n raise ValueError('No work')\n else :\n return best_path[0]",
"def diameter(self):\n return 2 * self.radius",
"def total_edges_per_time_step(graphs: typ.Iterable[vtna.graph.Graph]) -> typ.List[int]:\n return [sum(edge.get_count() for edge in graph.get_edges()) for graph in graphs]",
"def number_of_deviation_edges(self):\n return len(list(self.deviation_edges()))",
"def getCircleDiameter(self):\n segments = []\n for (i, p1) in enumerate(self.points):\n for p2 in self.points[i+1:]:\n segments.append(Segment(p1, p2))\n s = max(segments, key=lambda s: s.length)\n return Circle(*s.middle, radius=s.length/2)",
"def draw_di_graph(graph_object, scale_by_degree=True):\n positions = nx.spring_layout(graph_object)\n if scale_by_degree:\n d = nx.degree(graph_object)\n keys, degrees = zip(*d)\n network = nx.draw(graph_object, nodelist=keys,\n node_size=[5*degree for degree in degrees],\n pos=positions, alpha=0.5, arrows=False)\n else:\n network = nx.draw(graph_object, pos=positions, node_size=50, alpha=0.5)\n # labels = nx.draw_networkx_labels(graph, pos=positions)\n return positions, network, plt.gca()",
"def Test_Diameter(Graph):\n\n Durchmesser = M_Graph.get_Diameter(Graph)\n KPS = float(sum(Durchmesser)) / float(len(Durchmesser))\n\n return KPS",
"def directedDFS(digraph, start, end, maxTotalDist, maxDistOutdoors, path=[],shortest = None):\n #TODO\n #assumes graph is a Digraph\n #assumes start and end are nodes in graph\n def Dist(path):\n result = 0\n if path == None:\n return result\n if len(path) == 0:\n return result\n for i in range(len(path)-1):\n src = path[i]\n dest = path[i+1]\n for item in digraph.edges[src]:\n if item[0] == dest:\n result += item[1]\n return result \n \n # Helper function to calculate Total Outdoor Distance in a path\n def Out(path):\n result = 0\n if path == None:\n return result \n if len(path) == 0:\n return result\n for i in range(len(path)-1):\n src = path[i]\n dest = path[i+1]\n for item in digraph.edges[src]:\n if item[0] == dest:\n result += item[2]\n return result\n \n# Helper function using DFS method\n def DFS(graph, start, end, maxD, maxO, path = []):\n path = path + [start]\n if start == end:\n return path\n shortest = None\n distShort = maxD\n outShort = maxO\n for node in graph.childrenOf(start):\n if node not in path: #avoid cycles\n newPath = DFS(graph, node, end, maxD, maxO, path)\n distNew = Dist(newPath)\n outNew = Out(newPath)\n if newPath!= None and distNew <= maxD and outNew <= maxO:\n if not shortest or distNew < distShort: #check if shorter than shortest\n shortest = newPath\n distShort = distNew\n outShort = outNew\n return shortest\n\n result = DFS(digraph, start, end, maxTotalDist, maxDistOutdoors)\n if result == None:\n raise ValueError\n else:\n return result",
"def get_diameter(self):\n\n if self.no_dist is False:\n dist = self.distance\n diam = dist * self.ang_size / 60. * np.pi/180. * ct._kpc_over_pc_\n self.diam = diam\n else:\n self.diam = -1 # use -1 to indicate unknown diameter\n\n return self.diam",
"def number_of_direct_deviation_edges(self):\n return len(list(self.direct_deviation_edges()))",
"def get_dependency_graph(self):\n return self.graph",
"def in_degree_distribution (digraph) :\n\n in_degree_dist = dict ()\n in_degrees = compute_in_degrees (digraph)\n\n for node in in_degrees :\n if in_degrees[node] in in_degree_dist :\n in_degree_dist[in_degrees[node]] += 1\n else :\n in_degree_dist[in_degrees[node]] = 1\n\n return in_degree_dist",
"def number_of_indirect_deviation_edges(self):\n return len(list(self.indirect_deviation_edges()))",
"def get_diameter(node):\n if node is None:\n return 0\n else:\n diameter_root = get_max_path(node.left) + get_max_path(node.right) + 1\n #print 'max_path from {} is {}'.format(node.value, diameter_root)\n diameter_left = get_diameter(node.left)\n diameter_right = get_diameter(node.right)\n return max(diameter_left, diameter_right, diameter_root)",
"def density(self):\n raise TypeError(\"The density function is not support on a Multigraph.\")",
"def subgraphs_of_length(self, days=None, periods=None):\n graphs = []\n if days:\n sg_length = datetime.timedelta(days=days)\n else:\n sg_length = periods\n\n start_date = self.min_date\n end_date = start_date + sg_length\n done = False\n while not done:\n if start_date > self.max_date:\n break\n if end_date > self.max_date:\n # end_date = self.max_date\n done = True\n print(start_date, end_date)\n new = self.subgraph_within_dates(start_date, end_date)\n if new.nx_graph.number_of_edges():\n graphs.append(new)\n start_date += sg_length\n end_date += sg_length\n return graphs",
"def graphDSD(database: str) -> int:\n\n db = _database(database)\n\n if db:\n return graph.graphDSD(database)\n\n else:\n return None"
] | [
"0.700528",
"0.64061725",
"0.6378661",
"0.6351375",
"0.6088722",
"0.5843668",
"0.5843668",
"0.5829243",
"0.57851386",
"0.56062824",
"0.55966824",
"0.55673695",
"0.55673695",
"0.55229545",
"0.5520967",
"0.5497087",
"0.54773235",
"0.54561347",
"0.5399723",
"0.5395755",
"0.5389739",
"0.53170437",
"0.5293705",
"0.5268244",
"0.5255574",
"0.5241383",
"0.52383834",
"0.5229378",
"0.52201384",
"0.5211598"
] | 0.7837539 | 0 |
Generate all possible pairs from the list of given elements. | def all_pairs(elements):
if len(elements) < 2:
return []
elif len(elements) == 2:
return [(elements[0], elements[1])]
else:
new_pairs = []
for elt in elements[1:]:
new_pairs.append((elements[0], elt))
return all_pairs(elements[1:]) + new_pairs | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def pairs(lst):\r\n\tfor i in range(1, len(lst), 2):\r\n\t\tyield lst[i-1], lst[i]",
"def all_pairs(items, sort=False):\n if sort:\n items = sorted(items)\n for i, ni in enumerate(items):\n for j, nj in enumerate(items):\n if j > i: yield ni, nj",
"def __unordered_pairs(l):\n\n return [(l[i], l[j]) for i in range(len(l) - 1) for j in range(i + 1, len(l))]",
"def list_to_pairs(l):\n return {(l[2*i], l[2*i+1]) for i in range(len(l)/2)}",
"def split_in_pairs(arg: Iterable) -> Iterable[Tuple]:\n # We are using zip_longest with one clever hack:\n # https://docs.python.org/3/library/itertools.html#itertools.zip_longest\n # We create an iterator out of the list and then pass the same iterator to\n # the function two times. Thus the function consumes a different element\n # from the iterator each time and produces the desired result.\n iterator = iter(arg)\n return zip_longest(iterator, iterator)",
"def get_pairs(terms):\n return itertools.combinations(terms, 2)",
"def pairwise(lst):\r\n if not lst: return\r\n\r\n for i in range(len(lst)-1):\r\n yield lst[i], lst[i+1]\r\n yield lst[-1], None",
"def pairs(lst):\n i = iter(lst)\n prev = next(i)\n for item in i:\n yield prev, item\n prev = item",
"def pair_combos(iterable):\n pairs = set()\n for a in iterable:\n for b in iterable:\n pairs.add(a + b)\n return list(pairs)",
"def triplewise(iterable):\n # triplewise('ABCDEFG') -> ABC BCD CDE DEF EFG\n for (a, _), (b, c) in pairwise(pairwise(iterable)):\n yield a, b, c",
"def get_all_possible_pairs(self, a, b):\n return itertools.product(a, b)",
"def get_pairs(my_list):\n return [(current, my_list[idx + 1] if - 1 else None) for idx, current in enumerate(my_list) if idx < len(my_list) - 1]",
"def stagger_tuple(elements_list, initial=None):\n res = []\n previous_element = initial\n for element in elements_list:\n if previous_element is not None:\n res.append((previous_element, element))\n previous_element = element\n return res",
"def make_pairs(sequence):\n length = len(sequence)\n return [\n (sequence[i], sequence[i + 1])\n for i in range(length - 1)\n ]",
"def get_all_possible_os_pairings(indices_list):\n pairs = []\n itr = 0\n\n for links in indices_list:\n\n for item in links:\n for i in range(itr,len(links)):\n\n if item == links[i]:\n continue\n else:\n pair = item, links[i]\n pairs.append(pair)\n return pairs",
"def ring_pairs(v):\n for i in range(len(v)):\n a = v[i]\n b = v[ (i+1) % len(v) ]\n yield (a,b)",
"def generating_pairs(self, players_list) -> list[tuple[Player]]:\n apairing_players = []\n already_paired = []\n id_number = 0\n times_number_loop = 0\n breaks_number = 0\n while len(apairing_players) != 4:\n\n times_number_loop += 1\n if id_number == 8:\n id_number = 0\n pair = self.create_pair(players_list, id_number, already_paired)\n if pair is None:\n id_number += 1\n else:\n already_paired.append(pair[0])\n already_paired.append(pair[1])\n apairing_players.append(pair)\n id_number += 1\n if times_number_loop == 50:\n already_paired, apairing_players = self.break_pair(already_paired, apairing_players, breaks_number)\n times_number_loop = 0\n breaks_number += 1\n\n return apairing_players",
"def getallpairs(self, x):\n result = []\n for u in range(len(x) - 1):\n result.extend([x[u] + a for a in x[u+1:]])\n\n return result",
"def generate_pairs(self, _list_d):\n\n length = len(_list_d)\n result_list = {}\n\n for i in range(length):\n for j in xrange(i+1,length):\n l = len(result_list)\n result_list[l] = ((i, _list_d[i]),(j, _list_d[j]))\n\n return result_list",
"def make_tag_pairs(self, input, start, end, elements):\n tps = TagPairs()\n for e in elements:\n k = [k for k in e.keys()][0]\n tps[k] = e[k]\n return tps",
"def pairwise(iterable):\n a = iter(iterable)\n return zip(a, a, a)",
"def get_list_of_all_pairs_lists(input_lst):\n # handle possible case of empty list input\n if len(input_lst) == 0:\n return [[]]\n\n # base case - if list is two items long\n elif len(input_lst) == 2:\n return [[(input_lst[0], input_lst[1])]]\n\n else:\n combos = []\n first_item = input_lst[0] # first item in list\n\n # look at all items after first item - pair each with first item\n for i in range(1, len(input_lst)):\n\n pair = (first_item, input_lst[i])\n\n other_items_list = input_lst[1:i] + input_lst[i + 1 :]\n\n for rest in get_list_of_all_pairs_lists(other_items_list):\n\n combos.append([pair] + rest)\n\n return combos",
"def tripletGenerator(S):\n for a in S:\n for b in S:\n for c in S:\n yield (a, b, c)",
"def combinations(*args: List[Any]) -> List[List]:\n return list([list(el) for el in list(product(*args))])",
"def pairwise(iterable):\n a = iter(iterable)\n return zip(a, a)",
"def pairwise(s: List[Any]) -> Iterator[Tuple[Any, Any]]:\n\n a, b = itertools.tee(s)\n next(b, None)\n return zip(a, b)",
"def pairs(iterable):\n previous = None\n for item in iterable:\n current = item\n if previous is not None:\n yield previous, current\n previous = current",
"def generate_pairs(number: int) -> List[List[int]]:\n return [\n [top, inner]\n for top in range(number + 1)\n for inner in range(top, number + 1)\n ]",
"def generate_pairs_of_words(word_list):\n def pair_words(word_list, i, j, connector):\n return word_list[i] + connector + word_list[j]\n pairs = []\n n = len(word_list)\n for i in range(n-1):\n for j in range(i+1, n):\n pairs.append(pair_words(word_list, i, j, ' '))\n pairs.append(pair_words(word_list, j, i, ' '))\n pairs.append(pair_words(word_list, i, j, '-'))\n pairs.append(pair_words(word_list, j, i, '-'))\n pairs.append(pair_words(word_list, i, j, '_'))\n pairs.append(pair_words(word_list, j, i, '_'))\n pairs.append(pair_words(word_list, i, j, ''))\n pairs.append(pair_words(word_list, j, i, ''))\n outputs = list(set(pairs)) # remove duplicates\n return outputs",
"def pairwise(iterable):\n a, b = itertools.tee(iterable)\n next(b, None)\n return list(zip(a, b))"
] | [
"0.7500274",
"0.6851043",
"0.6840614",
"0.68141717",
"0.6754132",
"0.669949",
"0.6683781",
"0.6671177",
"0.6668704",
"0.65523124",
"0.64457446",
"0.64399433",
"0.6394851",
"0.636628",
"0.6352524",
"0.6351291",
"0.6324047",
"0.6315178",
"0.6308765",
"0.63008136",
"0.62359875",
"0.62253916",
"0.62190825",
"0.6217203",
"0.6198219",
"0.61822855",
"0.61587",
"0.61572254",
"0.6155779",
"0.6145708"
] | 0.8119166 | 0 |
serialize internal keyvalue pair to byte_array, only pickle objects when necessary | def serialize(self):
byte_array = bytearray()
header = (
self.sequence_number | (1 << 63)
if self.type == KeyType.PUT
else self.sequence_number
)
# append header first
byte_array.extend(byte_utils.integer_to_n_bytes_array(header, 8))
pickle_key = pickle.dumps(self.key)
# key length
byte_array.extend(byte_utils.integer_to_four_bytes_array(len(pickle_key)))
# key byte array
byte_array.extend(pickle_key)
# it is a put operation, value is needed
if self.type == KeyType.PUT:
pickle_value = pickle.dumps(self.value)
# value length
byte_array.extend(byte_utils.integer_to_four_bytes_array(len(pickle_value)))
# value byte array
byte_array.extend(pickle_value)
return bytes(byte_array) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def serialize(self, value) -> bytes:\n pass",
"def _encode_value(self, value):\n return pickle.dumps(value)",
"def serialize(obj):\n return pickle.dumps(obj)",
"def dump_object(self, value):\n return pickle.dumps(value)",
"def __bytes__(self):\n byteout = bytearray()\n for index in range(1, 15):\n key = \"d\" + str(index)\n if self._user_data.get(key) is not None:\n byteout.append(self._user_data[key])\n else:\n byteout.append(0x00)\n return bytes(byteout)",
"def to_bytes(self, ???):",
"def serialize(self, value: VALUE) -> bytes:\n raise NotImplementedError",
"def serialize(obj):\n return np.fromstring(pickle.dumps(obj), dtype=np.uint8).astype(np.float32)",
"def serialize(obj):\n return np.fromstring(pickle.dumps(obj), dtype=np.uint8).astype(np.float32)",
"def serialize(self) -> bytes:\n return pickle.dumps(self)",
"def serialize(self) -> bytes:\n pass",
"def serialize(self) -> bytes:\n pass",
"def dumps_value(obj):\n type_key = type_keys.Value.assign(obj)\n\n if type_key == type_keys.Value.INTEGER:\n binary_data = struct.pack(\"!q\", obj)\n elif type_key == type_keys.Value.FLOAT:\n binary_data = struct.pack(\"!d\", obj)\n elif type_key == type_keys.Value.COMPLEX:\n binary_data = struct.pack(formats.COMPLEX_PACK, obj.real, obj.imag)\n elif type_key == type_keys.Value.NUMPY_OBJ:\n binary_data = common.data_to_binary(obj, np.save)\n elif type_key == type_keys.Value.STRING:\n binary_data = obj.encode(common.ENCODE)\n elif type_key in (type_keys.Value.NULL, type_keys.Value.CASE_DEFAULT):\n binary_data = b\"\"\n elif type_key == type_keys.Value.PARAMETER_VECTOR:\n binary_data = common.data_to_binary(obj, _write_parameter_vec)\n elif type_key == type_keys.Value.PARAMETER:\n binary_data = common.data_to_binary(obj, _write_parameter)\n elif type_key == type_keys.Value.PARAMETER_EXPRESSION:\n binary_data = common.data_to_binary(obj, _write_parameter_expression)\n else:\n raise exceptions.QpyError(f\"Serialization for {type_key} is not implemented in value I/O.\")\n\n return type_key, binary_data",
"def from_value(value):\n return pickle.dumps(value)",
"def __bytes__(self):\n with BytesIO() as b:\n self.save(b)\n return b.getvalue()",
"def to_bytes(self) -> bytes:",
"def serialize(self): \n with open(self.path+self.name, \"wb\") as pfile:\n pickle.dump(self.pyObj, pfile)",
"def serialize(self, data):",
"def get_dict_of_bytes2(self):\n pass",
"def _serialize_array(self, array):\n buffer = io.BytesIO()\n np.save(buffer, array)\n return buffer.getvalue()",
"def __array__(self):\n return dict2rec(self)",
"def dic_pickle_dumps_and_b64(data):\n for i in data:\n data[i] = base64.b64encode(pickle.dumps(data[i]))\n return data",
"def encode(self):\r\n # Create dict from attributes. Maintain added order\r\n #jd = {'txpk': collections.OrderedDict()}\r\n jd = {'txpk':{}}\r\n\r\n for key in self.keys:\r\n val = getattr(self, key)\r\n\r\n if val is not None:\r\n if key == 'data':\r\n jd['txpk'][key] = val.decode('utf-8')\r\n else:\r\n jd['txpk'][key] = val\r\n #print('key',key)\r\n #print('valtype',type(val),val) \r\n #print(jd)\r\n \r\n return dumps(jd, separators=(',', ':'))",
"def deserialize(arr):\n return pickle.loads(arr.astype(np.uint8).tobytes())",
"def deserialize(arr):\n return pickle.loads(arr.astype(np.uint8).tobytes())",
"def serialize(self):",
"def serialize_forstorage(cls, obj):\n return misc.serialize_forstorage(obj)",
"def ToBytes(self, value) -> bytes:\n pass",
"def encode(Value):\n return base64.b64encode(zlib.compress(pickle.dumps(Value),9))",
"def _store(self):\n store_dict = {}\n for key, val in self._data.items():\n store_dict[key] = pickle.dumps(val, protocol=self.v_protocol)\n store_dict[PickleResult.PROTOCOL] = self.v_protocol\n return store_dict"
] | [
"0.64888334",
"0.6360432",
"0.6315611",
"0.6277576",
"0.6177769",
"0.6171004",
"0.6164912",
"0.6144653",
"0.6144653",
"0.61439574",
"0.6126626",
"0.6126626",
"0.60924256",
"0.60020936",
"0.59987134",
"0.59793663",
"0.5974535",
"0.59639865",
"0.5963505",
"0.5941758",
"0.59316015",
"0.5917161",
"0.5906177",
"0.58746827",
"0.58746827",
"0.58694506",
"0.5840877",
"0.5814872",
"0.5797925",
"0.57812107"
] | 0.7145172 | 0 |
Import ASHRAE data from a directory containing the .csv files. | def import_data(ashrae_dir, filenames=const.NAMES):
print('Importing data from csv')
ashrae_dir = pathlib.Path(ashrae_dir)
data = {name: pd.read_csv((ashrae_dir / name).with_suffix('.csv')) for name in filenames}
return data | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def read_csv(self, path):\n for file in os.listdir(path):\n if file[-4:] == \".csv\":\n name = file[:-4]\n table_index_header = cfg.get_list(\"table_index_header\", name)\n filename = os.path.join(path, file)\n self.input_data[name] = pd.read_csv(\n filename,\n index_col=list(range(int(table_index_header[0]))),\n header=list(range(int(table_index_header[1]))),\n squeeze=(\"series\" not in name),\n )\n self.check_input_data(warning=False)\n self.add_meta_data()\n return self",
"def load_data(path):\n\n columns = ['Item Year', 'Original Value', 'Standard Value', 'Original Currency',\n 'Standard Currency', 'Orignal Measure', 'Standard Measure', 'Location',\n 'Commodity']\n col_type = [int, float, float, object, object, object, object, object]\n\n col_type_dict = dict(zip(columns, col_type))\n\n au_df = pd.read_csv(path, usecols=columns)\n au_df = au_df.astype(col_type_dict)\n au_df.name = 'AU_data'\n \n return au_df, columns",
"def data_import(path):\n train_path = os.path.join(path, \"train.csv\")\n test_path = os.path.join(path, \"test.csv\")\n df_train = pd.read_csv(train_path)\n df_test = pd.read_csv(test_path)\n return df_train, df_test",
"def loadCSV(input_file):",
"def import_csv_data(cr, registry):\n files = ['data/sc.info.csv']\n for file in files:\n tools.convert_file(cr, 'prospects_app', file, None,\n mode='init', noupdate=True, kind='init')",
"def _read_csv(self):\n self.function_name = '_read_csv'\n with open(os.path.join(self.task.downloads, self.csv_name)) as csv_file:\n reader = csv.reader(csv_file, dialect='excel')\n for row in reader:\n self.input_data.append(row)",
"def import_ag_data(data_csv):\n df = pd.read_csv(data_csv)\n col_to_drop = ['Program', 'Period', 'Week Ending', 'Geo Level', 'State',\n 'State ANSI', 'Zip Code', 'Region', 'watershed_code',\n 'Watershed', 'Data Item', 'Domain', 'Domain Category',\n 'Ag District', 'Ag District Code', 'CV (%)']\n df = df.drop(col_to_drop, axis=1)\n df = df[(df['Value'] != ' (D)') & (df['Value'] != ' (Z)')]\n df = df.replace(to_replace=r',', value='', regex=True)\n df['Value'] = df['Value'].astype('int')\n df = df.rename(columns={'Value': 'Yield'})\n df['Year'] = pd.to_datetime(df['Year'], format='%Y')\n return df",
"def import_data():\n data = pd.read_csv('partA/bikes_October18.csv', ',')\n return data",
"def from_csv(self):\n timestamp_logname = \"from_csv_\" + datetime.today().strftime('%Y_%m_%d_%H_%M_%S')\n csv_files = [f for f in self.args.files if f.endswith('.csv')]\n if not csv_files:\n self.logger.error(\"No CSV files found.\")\n return False\n\n # Create an instance of the Ingestor class with common options set.\n ingestor = Ingestor(**self.options)\n\n # Ingest from each CSV file.\n for csv_file in csv_files:\n data_groups = Ingestor.process_csv(csv_file)\n for mask, routes, deployment_number in data_groups:\n ingestor.load_queue(mask, routes, deployment_number)\n ingestor.ingest_from_queue()\n\n # Write out any failed ingestions from the entire batch to a new CSV file.\n if ingestor.failed_ingestions:\n ingestor.write_failures_to_csv(timestamp_logname)\n\n self.logger.info('')\n self.logger.info(\"Ingestion completed.\")\n return True",
"def importData(filename):\r\n data = pd.read_csv(filename)\r\n return data",
"def dataLoad():\n try:\n try: #Python3\n f = open(__file__ + \".csv\",\"rt\")\n except: #Python2\n f = open(__file__ + \".csv\",\"rb\")\n data = f.read().split(',')\n entryCol.entry0.delete(0,END)\n entryCol.entry0.insert(0,data[0])\n entryCol.entry1.delete(0,END)\n entryCol.entry1.insert(0,data[1])\n entryCol.entry2.delete(0,END)\n entryCol.entry2.insert(0,data[2])\n entryCol.entry3.delete(0,END)\n entryCol.entry3.insert(0,data[3])\n botWind.writeN(\"DataLoad: File\")\n except:\n botWind.writeN(\"DataLoad: Default\")",
"def from_csv_to_database():\r\n for year, path in FileNamePath.items():\r\n # load csv files\r\n with open(path, encoding='cp1251') as dataset:\r\n print(f\"Download {year} data\")\r\n get_curr_data(dataset, year)",
"def import_data(csv_file):\n # skips bad lines\n data = pd.read_csv(csv_file, error_bad_lines=False)\n return data",
"def import_hydx(hydx_path):\n hydx = Hydx()\n\n hydxcsvfiles = [\n \"Debiet.csv\",\n \"ItObject.csv\",\n \"Knooppunt.csv\",\n \"Kunstwerk.csv\",\n \"Meta.csv\",\n \"Nwrw.csv\",\n \"Oppervlak.csv\",\n \"Profiel.csv\",\n \"Verbinding.csv\",\n \"Verloop.csv\",\n ]\n implementedcsvfiles = [\n \"Debiet.csv\",\n # \"ItObject1.csv\",\n \"Knooppunt.csv\",\n \"Kunstwerk.csv\",\n # \"Meta1.csv\",\n # \"Nwrw1.csv\",\n \"Oppervlak.csv\",\n \"Profiel.csv\",\n \"Verbinding.csv\",\n \"Verloop.csv\",\n ]\n\n existing_files = []\n for f in hydxcsvfiles:\n csvpath = os.path.join(hydx_path, f)\n if not os.path.isfile(csvpath):\n logger.warning(\n \"The following hydx file could not be found: %s\",\n os.path.abspath(csvpath),\n )\n elif f not in implementedcsvfiles:\n logger.warning(\n \"The following hydx file is currently not implemented in this importer: %s\",\n csvpath,\n )\n else:\n existing_files.append(f)\n\n # TODO check if number of csvfiles loaded is same as number inside meta1.csv\n\n for f in existing_files:\n csvpath = os.path.join(hydx_path, f)\n with open(csvpath, encoding=\"utf-8-sig\") as csvfile:\n csvreader = csv.DictReader(csvfile, delimiter=\";\")\n hydx.import_csvfile(csvreader, f)\n\n hydx.check_import_data()\n\n return hydx",
"def load_ae(self, year):\n ae_paths = list(pathlib.Path(config.AE_DIR).glob(f'{year}*ae.txt'))\n assert len(ae_paths) == 1, (f'No AE files found.\\nae_dir={config.AE_DIR}, '\n f'year={year}, ae_paths={ae_paths}')\n ae_data = pd.read_csv(ae_paths[0], sep=' ', index_col=0, \n parse_dates=True, comment='#', \n names=['dateTime', 'AE'])\n return ae_data",
"def importAll():\n csvFile = openCsv()\n items = [] # chooseKey, count, grade, keyType, mainCategory, mainKey,\n # name, pricePerOne, subCategory, subKey, totalTradeCount,\n # mainLabel, subLabel, description\n\n with open(csvFile) as i:\n readItem = csv.reader(i)\n itemRow = next(readItem)\n for row in readItem:\n items.append(row)\n\n return items",
"def load_data(path):\n filenames = glob.glob(path + \"/*\")\n \n events = DataFrame()\n data = []\n \n for f in filenames:\n data.append(pandas.read_csv(f, index_col=None, header=0))\n \n events = pandas.concat(data)\n return events",
"def ImportFromCsvFile(self, csvfilename):\n stories = []\n with open(csvfilename, 'r') as content_file:\n delimiter=','\n quotechar='\"'\n if sys.version_info[0] == 2: #python2\n delimiter = delimiter.encode(\"ascii\")\n quotechar = quotechar.encode(\"ascii\")\n\n reader = csv.reader(content_file, delimiter=delimiter, quotechar=quotechar)\n headerrow = None\n for row in reader:\n if headerrow is None:\n headerrow = row\n continue\n story = Story.FromCsv(row)\n self.stories.append(story)",
"def import_from_file(jamsite, source='jammers.csv', fieldnames=None):\n\t# import jammers.csv\n\twith open(source) as csvfile:\n\t\tjamsite.mergeinsert( import_jammers(csvfile, fieldnames=fieldnames) )",
"def ingest_rental_csv(csv_path):\n # Create a CSV import generator (next yields one db row)\n import_generator = import_csv_gen(csv_path)\n # Skip over the title row\n next(import_generator)\n # Iterate over all other rows\n while True:\n try:\n data = next(import_generator)\n if len(data) != 2:\n logger.error(f'Data with incorrect item count: {len(data)}')\n continue\n # extract items from list and add document to database\n with Connection():\n rental = Rental(\n product_id=data[RENTAL_PROD_ID],\n user_id=data[RENTAL_USER_ID]\n )\n rental.save() # This will perform an insert\n except StopIteration:\n break",
"def import_files_table(path):\n return pd.read_csv(path, sep=\"\\t\", skiprows=1, header=0)",
"def process_file_import(self):\r\n directory_csv = [file for file in os.listdir() if file.endswith(\".csv\")]\r\n self.print_options(directory_csv,2)\r\n\r\n \"\"\"\r\n Asks for user input. Then imports csv file based on user's input.\r\n \"\"\"\r\n n = (input(\"Which csv would you like to import? Please input the corresponding integer:\"))\r\n\r\n try:\r\n n = int(n)\r\n except:\r\n pass\r\n\r\n if isinstance(n, int) is True and n <= len(directory_csv):\r\n self.population.import_csv(directory_csv[int(n)-1])\r\n print(self.population)\r\n self.file_import()\r\n elif n == 'q':\r\n quit()\r\n elif n == 'b':\r\n self.menu_page()\r\n else:\r\n raise InputError(\"\\nPlease input a valid digit, 'q' or 'b'\")",
"def import_data():\n import pandas as pd\n \n df = pd.read_csv('Company_Bankruptcy_Prediction.csv')\n return df",
"def importAllDatasets(directory):\n head_index = findIndex(temp_list, \"Gaze\")\n point_index = findIndex(temp_list, \"Point\")\n grab_index = findIndex(temp_list, \"Grab\")\n pos_index = findIndex(temp_list, \"Position\")\n\n head_data = pd.read_csv(temp_list[head_index]) if head_index != None else None\n point_data = pd.read_csv(temp_list[point_index]) if point_index != None else None\n grab_data = pd.read_csv(temp_list[grab_index]) if grab_index != None else None\n pos_data = pd.read_csv(temp_list[pos_index]) if pos_index != None else None\n\n\n return head_data, point_data, grab_data, pos_data",
"def load_data(csv_path):\n df = pd.read_csv(csv_path)\n return df",
"def import_csv(self, csvfileobject):\n # Clear previously stored info\n self._tracks = []\n self._selected = None\n\n for row in csvfileobject:\n if row[0] == \"T\":\n track = self.addTrack()\n track.properties = row\n elif row[0] == \"P\":\n period = self.addPeriod([0,1,'-'])\n period.properties = row",
"def load_data_csv():\r\n \r\n # Load lookup table\r\n path = 'data/id_lookup.csv'\r\n lookup_table = pd.read_csv(path, index_col=0)\r\n\r\n # Load song data\r\n path2 = 'data/data_lyrics_features.csv'\r\n data = pd.read_csv(path2, index_col=0)\r\n\r\n return data, lookup_table",
"def import_data():\n\tif os.path.exists(\"log.csv\"):\n\t\t#print (\"--training data imported to data frame\\n\")\n\t\tdf = pd.read_csv(\"log.csv\", index_col=0)\n\telse:\n\t\tprint(\"training CSV not found\")\n\t\texit()\n\t\n\treturn df",
"def data_from_csv(self, filepath):\n self.dataframe = pd.load_csv(filepath, separator='')",
"def _import_users(admin_access_token, users_csv_file):\n admin = User.query.filter_by(id_=ADMIN_USER_ID).one_or_none()\n if admin_access_token != admin.access_token:\n raise ValueError(\"Admin access token invalid.\")\n csv_reader = csv.reader(users_csv_file)\n for row in csv_reader:\n user = User(\n id_=row[0],\n email=row[1],\n access_token=row[2],\n username=row[3],\n full_name=row[4],\n )\n Session.add(user)\n Session.commit()"
] | [
"0.64119494",
"0.62574255",
"0.6136558",
"0.60596466",
"0.6012882",
"0.5962187",
"0.5958087",
"0.5912588",
"0.58974314",
"0.5878545",
"0.58628714",
"0.5797354",
"0.57774633",
"0.577703",
"0.5762553",
"0.57592994",
"0.57388955",
"0.57192475",
"0.57123953",
"0.5707139",
"0.56904113",
"0.5681002",
"0.56616414",
"0.56305915",
"0.561907",
"0.5616299",
"0.5616297",
"0.56127244",
"0.5609329",
"0.56071156"
] | 0.7007926 | 0 |
Import ASHRAE data with optional caching mechanism. | def get_raw_data(ashrae_dir, cache_file=None, filenames=const.NAMES):
cache_file = pathlib.Path(cache_file)
if cache_file is not None and cache_file.exists():
data = import_dict_from_cached(cache_file, filenames)
else:
data = import_data(ashrae_dir)
_cache_data(data, cache_file)
# Sanity check: the set of building ids should be the same in the train and test sets.
assert set(data['train'].building_id) == set(data['test'].building_id)
return data | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def import_(self, data):\n return self.__import(data)",
"def load_data(self) -> None:",
"def load_data(data_set_key: str, a2e_data_path: str = '../../../a2e-data/data', cache_dir: str = None) -> BearingDataSet:\n\n if a2e_data_path is not None and not a2e_data_path.startswith('http') and not a2e_data_path.startswith('file://'):\n if os.path.isabs(a2e_data_path):\n a2e_data_path = 'file://' + os.path.abspath(a2e_data_path)\n else:\n bearing_module_path = pathlib.Path(__file__).parent.absolute()\n absolute_data_path = os.path.abspath(os.path.join(bearing_module_path, a2e_data_path))\n if os.name == 'nt':\n absolute_data_path = f'/{absolute_data_path}'.replace('\\\\', '/')\n\n a2e_data_path = 'file://' + absolute_data_path\n\n if not os.path.isdir(a2e_data_path.replace('file://', '')):\n a2e_data_path = 'https://github.com/maechler/a2e-data/raw/master/data/'\n\n if cache_dir is None:\n cache_dir = os.path.join(Path.home(), '.a2e')\n\n if not os.path.exists(cache_dir):\n os.makedirs(cache_dir)\n\n a2e_data_path = a2e_data_path.rstrip('/') + '/'\n data_set_description_origin = f'{a2e_data_path}{data_set_key}.yaml'\n data_set_origin = f'{a2e_data_path}{data_set_key}.csv.gz'\n data_set_description_path = get_file(data_set_key + '.yaml', origin=data_set_description_origin, cache_dir=cache_dir, cache_subdir='datasets/bearing')\n windows = {}\n\n with open(data_set_description_path) as data_set_description_file:\n data_set_description = yaml.load(data_set_description_file, Loader=yaml.FullLoader)\n data_set_path = get_file(data_set_key + '.csv.gz', origin=data_set_origin, cache_dir=cache_dir, cache_subdir='datasets/bearing', file_hash=data_set_description['data']['md5_hash'], hash_algorithm='md5')\n\n with gzip.open(data_set_path, mode='rt') as data_set_file:\n data_frame = pd.read_csv(data_set_file, parse_dates=[data_set_description['data']['index_column']], date_parser=lambda x: timestamp_to_date_time(float(x)), quotechar='\"', sep=',')\n data_frame = data_frame.set_index(data_set_description['data']['index_column'])\n\n for window_key, window_description in data_set_description['windows'].items():\n windows[window_key] = {\n 'mask': (data_frame.index > window_description['start']) & (data_frame.index <= window_description['end']),\n 'label': window_description['label'],\n }\n\n return BearingDataSet(data_set_key, data_frame, windows)",
"def _loadData(self):\n self.d = read_ac_data.read_ac_data_wrapper(self.sc_id, self.date,\n dType='10Hz')\n return",
"def data_airline():\n return load_airline()",
"def import_data_helper(self): \n if len(self.components) == 1:\n hapi.fetch(TableName = self.tablename, M = self.components[0][0], I = self.components[0][1], numin = self.min_x, numax = self.max_x)\n else: \n global_id = []\n for c in self.components:\n global_id.append(hapi.ISO[c][0])\n hapi.fetch_by_ids(TableName = self.tablename, iso_id_list = global_id, numin = self.min_x, numax = self.max_x)",
"def importData():\n #importChallengeDataToDB()\n importTrendingDataToDB()",
"def import_data(ashrae_dir, filenames=const.NAMES):\n print('Importing data from csv')\n ashrae_dir = pathlib.Path(ashrae_dir)\n data = {name: pd.read_csv((ashrae_dir / name).with_suffix('.csv')) for name in filenames}\n\n return data",
"def load_data(self):",
"def _import(self, data):\n if isinstance(data, dict):\n if len(data):\n for key in data:\n if data.get(key) is not None:\n if not self.set(key, data.get(key)):\n raise Exception('%s %s icin dogru bir veri degil.' % (data.get(key), key))",
"def _load_data(self):\n\n if not self._cache.exists(config.DATAFRAME_SONG_DATA):\n source_path = os.path.join(config.S3_SONG_DATA, 'A/A/A/*.json') # Note: song database is way big, so we get only a slice of it.\n dataframe = self._get_spark_session().read.json(source_path)\n self._cache.set_source(config.DATAFRAME_SONG_DATA, dataframe)",
"def import_data(self, data):\n # Import additional data for tuning\n # data: a list of dictionarys, each of which has at least two keys, 'parameter' and 'value'\n pass",
"def test_import_data():\n\n file_path = os.path.join(CONST_ADVANTICSYS_DIR, CONST_ADVANTICSYS_TEST_1)\n\n # Bring df\n success, log, test_ingress_df = advanticsys_import(file_path)\n assert success, log\n assert isinstance(test_ingress_df, pd.DataFrame)\n\n # Test import function\n success, log = import_data(\n test_ingress_df,\n CONST_ADVANTICSYS,\n SQL_USER,\n SQL_PASSWORD,\n SQL_HOST,\n SQL_PORT,\n SQL_TEST_DBNAME\n )\n\n assert success is True, log \n assert log == \"New: 0 (uploaded); Duplicates: 75 (ignored)\"",
"def load_apc(self, apc_path):\n self.apc = pd.read_pickle(os.path.join(self.data_path, 'apc.pick'))",
"def action_import(self):\n ctx = self._context\n \n data = base64.b64decode(self.data)\n file_input = cStringIO.StringIO(data)\n file_input.seek(0)\n reader_info = []\n if self.delimeter:\n delimeter = str(self.delimeter)\n else:\n delimeter = ','\n reader = csv.reader(file_input, delimiter=delimeter,\n lineterminator='\\r\\n')\n try:\n reader_info.extend(reader)\n except Exception:\n raise exceptions.Warning(_(\"Not a valid file!\"))\n keys = reader_info[0]",
"def importData( self, asset = '', searchAndReplace = ['',''] ):\n\t\tpickleData = pickle.load( open( self.dataPath.path, \"rb\" ) )\n\t\tlayers = [RenderLayerData(l,d) for l,d in pickleData.items() if not ':' in l]\n\t\tfor l in layers:\n\t\t\tif not searchAndReplace [0]== '' or not searchAndReplace[1] == '':\n\t\t\t\tl.filterMe( asset, searchAndReplace )\n\t\t\tl.create()\n\t\t\tl.addObjects()\n\t\t\tl.makeOverrides()\n\t\t\tl.makeOverrideConnections()\n\t\t\tl.makeShaderOverride()",
"def preload(self):\n # load the objects\n for otype, fname in self.TYPE2NAME.items():\n if fname:\n path = os.path.join(self.anodir, fname + \".gz\")\n if os.path.isfile(path):\n with gzip.open(path, \"rt\") as handler:\n for line in handler:\n omap = json.loads(line)\n cls = self.TYPE2CLASS[otype]\n item = cls.from_map(omap, self)\n self.caches[otype][item.id] = item",
"def import_sitefinder_data(path):\n asset_data = []\n\n site_id = 0\n\n with open(os.path.join(path), 'r') as system_file:\n reader = csv.DictReader(system_file)\n next(reader, None)\n for line in reader:\n if line['Operator'] != 'Airwave' and line['Operator'] != 'Network Rail':\n # if line['Operator'] == 'O2' or line['Operator'] == 'Vodafone':\n # if line['Anttype'] == 'MACRO' or \\\n # line['Anttype'] == 'SECTOR' or \\\n # line['Anttype'] == 'Sectored' or \\\n # line['Anttype'] == 'Directional':\n asset_data.append({\n 'type': \"Feature\",\n 'geometry': {\n \"type\": \"Point\",\n \"coordinates\": [float(line['X']), float(line['Y'])]\n },\n 'properties':{\n 'name': 'site_' + str(site_id),\n 'Operator': line['Operator'],\n 'Opref': line['Opref'],\n 'Sitengr': line['Sitengr'],\n 'Antennaht': line['Antennaht'],\n 'Transtype': line['Transtype'],\n 'Freqband': line['Freqband'],\n 'Anttype': line['Anttype'],\n 'Powerdbw': line['Powerdbw'],\n 'Maxpwrdbw': line['Maxpwrdbw'],\n 'Maxpwrdbm': line['Maxpwrdbm'],\n 'Sitelat': float(line['Sitelat']),\n 'Sitelng': float(line['Sitelng']),\n }\n })\n\n site_id += 1\n\n else:\n pass\n\n return asset_data",
"def _load_cache(self):\n logger.debug(\"Loading coherence data for %s from cache\", self.w1)\n\n assert self.variant_unit is None, \"Cannot load from cache once variant_unit has been set\"\n with open(self._cache_key) as f:\n self.rows = json.load(f)\n\n self._already_generated = True\n logger.debug(\"Loaded {} rows from cache ({})\".format(len(self.rows), self._cache_key))",
"def importer():\n pass",
"def load_data(\n cache_dir: Optional[str] = None,\n) -> tuple[client_data.ClientData, client_data.ClientData]:\n database_path = download.get_compressed_file(\n origin='https://storage.googleapis.com/tff-datasets-public/shakespeare.sqlite.lzma',\n cache_dir=cache_dir,\n )\n train_client_data = sql_client_data.SqlClientData(\n database_path, split_name='train'\n ).preprocess(_add_parsing)\n test_client_data = sql_client_data.SqlClientData(\n database_path, split_name='test'\n ).preprocess(_add_parsing)\n return train_client_data, test_client_data",
"def importAbc(self, parent_transform=True):\n self.logger.info(\"Import Alembic\")\n\n if self.data['cacheFileNameAttr'] != '':\n if os.path.isfile(self.data['cacheFileNameAttr']):\n\n # now try the abcImport\n try:\n if parent_transform:\n cmds.AbcImport(self.data['cacheFileNameAttr'], reparent=self.data['transformNode'])\n self.logger.debug(\"Parenting to %s \" % self.data['transformNode'])\n else:\n cmds.AbcImport(self.data['cacheFileNameAttr']) \n\n self.logger.info(\"Imported : %s\" % self.data['cacheFileNameAttr'])\n return True\n\n except Exception, e:\n self.logger.error(\"Import Alembic Error : %s\" % e)\n return False\n else:\n self.logger.error(\"Missing file : %s\" % self.data['cacheFileNameAttr'])\n return False\n else:\n self.logger.info(\"Empty attribute : %s.cacheFileNames\" % self.data['shapeNode'])\n return False",
"def on_import(self, event=None):\n if event is not None:\n event.Skip()\n data_id, theory_id, state_id = self.set_data_helper()\n temp = data_id + state_id\n self.parent.set_data(data_id=temp, theory_id=theory_id)",
"def import_data(self, keyname, data):\n return self.database.jsonset(keyname, Path.rootPath(), data)",
"def __init__(self, filename = None, dbalias = None, hltpskey = None ):\n super(HLTPrescalesSetAccess,self).__init__( ConfigType.HLTPS, mainkey = \"prescales\",\n filename = filename, dbalias = dbalias, dbkey = hltpskey )\n self.loader.setQuery([\n \"SELECT HPS_DATA FROM {schema}.HLT_PRESCALE_SET WHERE HPS_ID={dbkey}\" # for current and new db schema\n ])\n self.load()",
"def dataLoad():\n try:\n try: #Python3\n f = open(__file__ + \".csv\",\"rt\")\n except: #Python2\n f = open(__file__ + \".csv\",\"rb\")\n data = f.read().split(',')\n entryCol.entry0.delete(0,END)\n entryCol.entry0.insert(0,data[0])\n entryCol.entry1.delete(0,END)\n entryCol.entry1.insert(0,data[1])\n entryCol.entry2.delete(0,END)\n entryCol.entry2.insert(0,data[2])\n entryCol.entry3.delete(0,END)\n entryCol.entry3.insert(0,data[3])\n botWind.writeN(\"DataLoad: File\")\n except:\n botWind.writeN(\"DataLoad: Default\")",
"def load_data(self):\n raise NotImplementedError()",
"def importAovs(self):\n\t\tLayersInfo = pickle.load( open( self.aovsPath.path, \"rb\") )\n\t\tmc.refresh( su = 1 )\n\t\tfor ao in LayersInfo.keys():\n\t\t\taov.create( ao, LayersInfo[ao]['name'], LayersInfo[ao]['type'], LayersInfo[ao]['enabled'] )\n\t\tmc.refresh( su = 0 )",
"def load_data(self, read_shelf):\n if read_shelf:\n try:\n # Attempt reading pre-shelved objects first\n self.__read_shelf()\n except Exception as e:\n print(f'Exception while reading the data shelf ({e})')\n # Otherwise, read data from the the json files\n self.__read_json()\n else:\n self.__read_json()",
"def loadData(self, file):\n self.data = batchImport(file, self.ps)"
] | [
"0.5765304",
"0.5686181",
"0.56845134",
"0.56063354",
"0.56054366",
"0.55914676",
"0.5575657",
"0.55459553",
"0.5527377",
"0.54661703",
"0.5434593",
"0.54312104",
"0.5410122",
"0.5381183",
"0.5379974",
"0.5346656",
"0.5326773",
"0.5326303",
"0.52563024",
"0.52543855",
"0.5239716",
"0.5209885",
"0.51924276",
"0.51161706",
"0.51098263",
"0.51079005",
"0.5107688",
"0.50871587",
"0.50703907",
"0.506887"
] | 0.61214995 | 0 |
Return the number of timestamps missing | def count_missing_timestamps(df):
no_of_timestamps = len(df.timestamp)
no_of_sites = len(set(df.site_id))
full_date_range = pd.date_range(start=min(df.timestamp), end=max(df.timestamp), freq='H')
no_of_missing_timestamps = no_of_sites * len(full_date_range) - no_of_timestamps
print(f'There are {no_of_timestamps} timestamps in the data. The full date range is {len(full_date_range)} long and'
f' there are {no_of_sites} sites so there should be {no_of_sites * len(full_date_range)} '
f'timestamps in the data. There are therefore {no_of_missing_timestamps} missing. ')
return no_of_missing_timestamps | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_no_missing_timesteps(timesteps, verbose=True):\n timesteps = _check_timesteps(timesteps)\n # Check if there are data\n if timesteps.size == 0:\n raise ValueError(\"No data available !\")\n # Check if missing timesteps\n dt = np.diff(timesteps)\n dts, counts = np.unique(dt, return_counts=True)\n if verbose:\n print(\" --> Starting at\", timesteps[0])\n print(\" --> Ending at\", timesteps[-1])\n if len(counts) > 1:\n print(\"Missing data between:\")\n bad_dts = dts[counts != counts.max()]\n for bad_dt in bad_dts:\n bad_idxs = np.where(dt == bad_dt)[0]\n bad_idxs = [b.tolist() for b in bad_idxs]\n for bad_idx in bad_idxs:\n tt_missings = timesteps[bad_idx : (bad_idx + 2)]\n print(\"-\", tt_missings[0], \"and\", tt_missings[1])\n raise ValueError(\"The process has been interrupted\")\n return",
"def calculateMissing(odf):\n df = odf.copy()\n # Calculate last minute of operation for each day in `df`\n df.loc[:, 'time'] = np.nan\n df.loc[:, 'time'] = df.index.astype(np.int64)//10**9 # (to unix timestamp) from nano seconds 10*9 to seconds\n days = df.groupby(df.index.date)['time'].agg(['min', 'max', 'count']) # aggreagate on groupby\n # total number of minutes on the day\n totalminday = (days['max']-days['min'])//60\n # minutes with data by day\n countminday = days['count'] # -1 due count is +1\n missminday = totalminday-countminday\n percmissminday = missminday/totalminday\n\n # print('not working on daemon just on jupyter notebook!!!')\n return np.mean(percmissminday) # average of missing minutes",
"def test_count_when_data_is_not_present(self):\n\n temp_data = []\n\n tt = TemperatureTracker()\n result = tt.count_from(temp_data)\n self.assertEqual(result, 0)",
"def __len__(self):\n if self.first_timestamp is None or self.last_timestamp is None:\n return 0\n return int(\n (self.last_timestamp - self.first_timestamp).total_seconds()\n ) // self.interval + 1",
"def timestamp_length(self) -> int:\n timestamps = self.timestamps_sorted_list()\n base_length = computation.num_digits(timestamps[0]) if len(timestamps) > 0 else -1\n indexes = [1, 2, 3, 4, 5, -1, -2, -3, -4] if len(timestamps) > 10 else list(range(1, len(timestamps)))\n for n in indexes:\n length = computation.num_digits(timestamps[n])\n if length != base_length:\n return -1\n return base_length",
"def N(self):\n return len(self.time)",
"def count_missing_stats(manifest):\n num_missing = 0\n for element in manifest:\n if element.missing_stats():\n num_missing += 1\n return num_missing",
"def test_load_points_times_length():\n df = leiap.get_points_times(warn='disable')\n assert df.shape[0] > 0",
"def get_missing(self):\n return self.serie.isna().sum()",
"def test_timestamp_spacing_one_missing(times):\n assert_series_equal(\n time.spacing(times[[0, 2, 3]], times.freq),\n pd.Series([True, False, True], index=times[[0, 2, 3]])\n )",
"def count_placeholders(series):\n count = 0\n\n for i in range(series.size-1, -1, -1):\n if pd.isnull(series[i]) or series[i] == 0:\n count += 1\n else:\n break\n\n return count",
"def get_missing_test_numbers(a_dict, logged_test_numbers):\n tnum_list = []\n for tname, t_dict in a_dict.iteritems():\n for tpin, r_dict in t_dict.iteritems():\n tnum_list.append(int(r_dict[\"Test number\"]))\n missing = list(set(tnum_list) - set(logged_test_numbers))\n return missing, tnum_list",
"def test_timestamp_not_found(self, l):\n extract_columns(data=self.data, columns=['a'], timestamps=['timestamp'])\n l.check(\n ('pynts.util', 'WARNING', \"Couldn't find timestamps '['timestamp']' in data, using 'ts' instead\"),\n )",
"def count(timeseries):\n try:\n return timeseries[0].points[0].value.int64_value\n except (IndexError, AttributeError) as exception:\n LOGGER.warning(\"Couldn't find any values in timeseries response\")\n LOGGER.debug(exception)\n return 0 # no events in timeseries",
"def get_naive_size(self) -> int:\n return (self.triples.time_end - self.triples.time_begin + 1).sum()",
"def missing_values():\n print('Missings in the train data:', train_data.isnull().sum())",
"def missing_reg(self):\n keys = []\n values = []\n count = [0] * 24\n\n for hour in self.data_file.buckets:\n for i in range(len(self.data_file.buckets[hour])):\n data_pt = self.data_file.buckets[hour][i]\n if data_pt['type'] == 'slow':\n time_before = self.data_file.buckets[hour][i - 1]['timestamp']\n time_slow = self.data_file.buckets[hour][i]['timestamp']\n if i != len(self.data_file.buckets[hour]) - 1:\n time_after = self.data_file.buckets[hour][i + 1]['timestamp']\n missing_reg_interval(keys, values, time_before, time_after, hour)\n else:\n missing_reg_interval(keys, values, time_before, time_slow, hour)\n if (time_slow - time_before) / float(Config.BOUNDARY) > 1:\n count[hour] += round((time_slow - time_before) / float(Config.BOUNDARY))\n missing_regular = dict(zip(keys, values))\n\n logger.info(f\"missing regular due to slow updates per hour: {count}\")\n logger.info(f\"missing regular due to slow updates: {missing_regular}\")\n logger.info(f\"total missing regular due to slow updates: {sum(count)}\")\n Config.ANALYSIS.write(\"\\n\")\n return missing_regular",
"def test_count_when_data_present(self):\n temp_data = [(1.00, time.localtime()), (2.00, time.localtime()),\n (3.00, time.localtime()), (4.00, time.localtime())]\n\n tt = TemperatureTracker(temp_data)\n result = tt.count_from(temp_data)\n self.assertEqual(result, 4)",
"def get_missing(self):\n missing_values = self.df[self.col_name].isnull().sum()\n return missing_values",
"def test_null_count(self):\n\n ld = Lambdata(self.df)\n num_nulls = ld.null_count()\n self.assertEqual(num_nulls, 3)",
"def find_missing(nums):\n # calculate sum of all elements\n # in input list\n sum_of_elements = sum(nums)\n\n # There is exactly 1 number missing\n n = len(nums) + 1\n actual_sum = (n * (n + 1)) / 2\n return actual_sum - sum_of_elements",
"def test_files_missing():\n\tfiledir = \"./goes_files/%Y_events/%Y%m%devents.txt\"\n\tt0 = timerange.start.datetime\n\tdays = [t0]\n\twhile timerange.end.datetime > t0:\n\t\tt0 = t0 + relativedelta(days=1)\n\t\tdays.append(t0)\n\n\tmissing_files = []\n\tfor d in days:\n\t\tif not os.path.exists(d.strftime(filedir)):\n\t\t\tmissing_files.append(d.strftime(filedir))\n\tprint(missing_files)",
"def num_failures(self):\n min_time = time.time() - self.window\n\n while self.failures and self.failures[0] < min_time:\n self.failures.popleft()\n\n return len(self.failures)",
"def test_time_supp_length_matches_no_timesteps(self):\n for no_timesteps in [5, 578, 993, 300072]:\n for dt in [0.1, 0.5, 3.0]:\n test_rec = rt.Recording(np.empty([6, no_timesteps, 1]), dt=dt)\n self.assertEqual(\n len(test_rec.time_supp),\n no_timesteps,\n 'Expected length of time_supp {} to match no_timesteps of '\n 'input {}.'.format(len(test_rec.time_supp), no_timesteps),\n )",
"def secondsCount(timestamp1, timestamp2):\n return timestamp1 - timestamp2",
"def _checkDT(self):\r\n dt = np.diff(self.tsec)\r\n \r\n dt_unique = np.unique(dt)\r\n \r\n if np.size(dt_unique) == 1:\r\n self.isequal = True\r\n else:\r\n self.isequal = False\r\n \r\n try:\r\n self.dt = dt[1]\r\n except:\r\n self.dt = 0.0",
"def reportnulls(self):\n self.null_counts = self.df.isnull().sum().sort_values(ascending=False)\n\n # return count of null values\n return self.null_counts",
"def check_trial_length(data, **_):\n # NaN values are usually ignored so replace them with Inf so they fail the threshold\n metric = np.nan_to_num(data[\"feedback_times\"] - data[\"goCue_times\"], nan=np.inf)\n passed = (metric < 60.1) & (metric > 0)\n assert data[\"intervals\"].shape[0] == len(metric) == len(passed)\n return metric, passed",
"def test_timestamp_spacing_too_frequent(times):\n assert_series_equal(\n time.spacing(times, '30min'),\n pd.Series([True] + [False] * (len(times) - 1), index=times)\n )",
"def __len__(self):\n return len(self._timeseriesData)"
] | [
"0.65900165",
"0.6369049",
"0.6316393",
"0.6256028",
"0.62290597",
"0.617701",
"0.61646646",
"0.6154375",
"0.60366976",
"0.6026818",
"0.6009626",
"0.5978013",
"0.59768033",
"0.5961701",
"0.5955014",
"0.58601505",
"0.5835052",
"0.58279943",
"0.5825557",
"0.5720708",
"0.5682689",
"0.5680005",
"0.56660974",
"0.56496996",
"0.5629173",
"0.5614725",
"0.5607036",
"0.5597966",
"0.55813426",
"0.5561822"
] | 0.8279923 | 0 |
Add missing timestamps to weather data and interpolate to fill in the data return df with missing times and weather data filled in | def add_missing_weather_data(df):
full_date_range = pd.date_range(start=min(df.timestamp), end=max(df.timestamp), freq='H')
sites = list(set(df.site_id))
full_data_site_range = pd.DataFrame(itertools.product(sites, full_date_range),
columns=['site_id', 'timestamp'])
df_all_dates = full_data_site_range.merge(df, on=['site_id', 'timestamp'], how='left')
df_all_dates = df_all_dates.groupby('site_id').apply(lambda group: group.interpolate(limit_direction='both'))
return df_all_dates | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def auto_fillna(ts: TimeSeries,\n **interpolate_kwargs) -> TimeSeries:\n\n ts_temp = ts.pd_dataframe()\n\n # pandas interpolate wrapper, with chosen `method`\n if 'limit_direction' not in interpolate_kwargs:\n interpolate_kwargs['limit_direction'] = 'both'\n interpolate_kwargs['inplace'] = True\n ts_temp.interpolate(**interpolate_kwargs)\n\n return TimeSeries.from_times_and_values(ts.time_index(), ts_temp.values)",
"def fill_missing_data_points(data):\n return data.interpolate()",
"def interpolate(df):\n for x in df.columns:\n if x == \"date\":\n continue\n df[x] = df[x].interpolate(method='linear', axis=0).ffill().bfill()\n return df",
"def _auto_fill(series: TimeSeries, **interpolate_kwargs) -> TimeSeries:\n\n series_temp = series.pd_dataframe()\n\n # pandas interpolate wrapper, with chosen `method`\n if \"limit_direction\" not in interpolate_kwargs:\n interpolate_kwargs[\"limit_direction\"] = \"both\"\n interpolate_kwargs[\"inplace\"] = True\n series_temp.interpolate(**interpolate_kwargs)\n return TimeSeries.from_dataframe(\n series_temp,\n freq=series.freq,\n static_covariates=series.static_covariates,\n hierarchy=series.hierarchy,\n )",
"def fill_weather_forecast_columns(df):\n\n filled_df = df.copy()\n filled_df.loc['2018-01-01','temp_KC':'wind_north_SD'] = filled_df.loc['2018-01-02','temp_KC':'wind_north_SD'].values\n filled_df.loc['2018-02-06','temp_KC':'wind_north_SD'] = filled_df.loc['2018-02-05','temp_KC':'wind_north_SD'].values\n filled_df.loc['2019-02-05','temp_KC':'wind_north_SD'] = filled_df.loc['2019-02-04','temp_KC':'wind_north_SD'].values\n # print(filled_df.isna().sum(axis=0))\n filled_df = filled_df.fillna(method='ffill', limit=1)\n # print(filled_df.isna().sum(axis=0))\n filled_df = filled_df.fillna(method='bfill', limit=1)\n # print(filled_df.isna().sum(axis=0))\n filled_df = filled_df.fillna(method='ffill', limit=1)\n\n any_nans = filled_df.isna().sum(axis=0)\n \n if any_nans.sum(axis=0) != 0:\n print('The function did not convert all NaNs. Some NaNs still exist.')\n\n return filled_df",
"def fill_test_weather_forecast_columns(df):\n\n filled_df = df.copy()\n filled_df.loc['2018-02-22','temp_KC':'wind_north_SD'] = filled_df.loc['2018-02-21','temp_KC':'wind_north_SD'].values\n filled_df.loc['2018-02-23','temp_KC':'wind_north_SD'] = filled_df.loc['2018-02-24','temp_KC':'wind_north_SD'].values\n # print(filled_df.loc['2018-02-21':'2018-02-24'])\n filled_df = filled_df.fillna(method='ffill', limit=1)\n # print(filled_df.isna().sum(axis=0))\n filled_df = filled_df.fillna(method='bfill', limit=1)\n # print(filled_df.isna().sum(axis=0))\n filled_df = filled_df.fillna(method='ffill', limit=1)\n\n any_nans = filled_df.isna().sum(axis=0)\n \n if any_nans.sum(axis=0) != 0:\n print('The function did not convert all NaNs. Some NaNs still exist.')\n\n return filled_df",
"def build_full_temp(df_temperature):\n df_interp_full = build_missing(df_temperature)\n df_full = pd.concat([df_interp_full, df_temperature], ignore_index=True)\n df_full = df_full.sort_values(by=['datetime'])\n return df_full",
"def clean_meteo_data(self, df):\n for col in df.columns:\n df[col] = df[col].str.replace(',', '.').astype(\"float\")\n# df_nan = df[df.isna().any(axis=1)]\n# print(\"Check Nans:\",df_nan.shape[0])\n df=df.fillna(method='ffill')\n# df_nan = df[df.isna().any(axis=1)]\n# print(\"Check Nans:\",df_nan.shape[0])\n# print(\"shape selected sensor data:\",df.shape)\n df=df.dropna()\n df=df.resample(\"10T\").mean()\n df=df.reset_index()\n df['dag']=df['datetime'].dt.day\n return df",
"def ts_resample(self):\n try:\n ts_freq = pd.DataFrame(\n index=pd.date_range(self.ts_df.index[0], self.ts_df.index[len(self.ts_df) - 1], freq=self.freq),\n columns=['dummy'])\n except ValueError:\n self._uvts_cls_logger.exception(\"Exception occurred, possibly incompatible frequency!\")\n sys.exit(\"STOP\")\n\n if self.fill_method == 'ffill':\n self.ts_df = ts_freq.join(self.ts_df).drop(['dummy'], axis=1)\n self.ts_df.y = self.ts_df.y.fillna(method='ffill')\n # if np.isnan ( self.ts_df.y ).any ():\n # self.ts_df.y = self.ts_df.y.fillna ( method='bfill' )\n else: # interp\n xp = np.linspace(0, self.ts_df.size, self.ts_df.size, endpoint=False)\n fp = self.ts_df['y']\n # join\n self.ts_df = ts_freq.join(self.ts_df).drop(['dummy'], axis=1)\n # pick new points\n x = np.linspace(0, ts_freq.size, ts_freq.size, endpoint=False)\n x = x[self.ts_df['y'].isna()]\n print(x.size)\n print(x)\n\n # put the values\n self.ts_df.y[self.ts_df['y'].isna()] = np.interp(x, xp, fp)\n\n if np.isnan(self.ts_df.y).any():\n self._uvts_cls_logger.warning(\"Some NaN found, something went wrong, check the data!\")\n sys.exit(\"STOP\")\n\n self._uvts_cls_logger.info(\"Time series resampled at frequency: \" + str(self.ts_df.index.freq) +\n \". New shape of the data: \" + str(self.ts_df.shape))\n self._uvts_cls_logger.info(\"Using time series data of range: \" + str(min(self.ts_df.index)) + ' - ' + str(\n max(self.ts_df.index)) + \" and shape: \" + str(self.ts_df.shape))\n\n return self",
"def smart_gas_nan_checker(smart, gas, weather, dwelling_id):\n\n print('Resampling smart, gas, weather')\n # For more resampling info see: https://pandas.pydata.org/pandas-docs/stable/api.html#id41\n # Makes missing gaps appear as NaN, these are the general raw dataframes to work with\n smart_10s = smart.resample('10s').mean()\n gas_h = gas.resample('H').mean()\n weather_10min = weather.resample('10min').mean()\n\n \"\"\"\n Create a dataframe with a 1 hour sample rate\n \"\"\"\n gas_h['gasPower'] = gas_h['gasMeter'].diff() # Calculate gasPower column\n gas_h['gasPower'][0] = gas_h['gasPower'][1] # Replace 1st entry (NaN) with 2nd entry\n\n smart_h = smart_10s.resample('H').mean() # Down sample smart\n weather_h = weather_10min.resample('H').mean() # Down sample weather\n\n # Combine gas, smart, weather\n df_hour = pd.merge(smart_h, gas_h, left_index=True, right_index=True)\n df_hour = pd.merge(df_hour, weather_h, left_index=True, right_index=True)\n\n \"\"\"\n Create smartmeter dataframe with a 10s sample rate\n \"\"\"\n gas_10s = gas_h.resample('10s').ffill() # Up sample gas to 10s\n # Calculate gasPower column, is this rhe right way? Or should we ffill it?\n # Currently this code makes it so there is one gasPower value per hour, we could ffill this also?\n gas_10s['gasPower'] = gas_10s['gasMeter'].diff()\n gas_10s['gasPower'][0] = gas_10s['gasPower'][1] # Replace 1st entry (NaN) with 2nd entry\n\n weather_10s = weather_10min.resample('10s').ffill() # forward fill because the raw data is the 10 minute mean\n\n # Combine gas, smart, weather\n df_10s = pd.merge(smart_10s, gas_10s, left_index=True, right_index=True)\n df_10s = pd.merge(df_10s, weather_10s, left_index=True, right_index=True)\n\n \"\"\"\n Do NaN analysis on the 10s and hour sample rate dataframes\n \"\"\"\n print('Length of combined df_10s: %s' % len(df_10s))\n print('df_nan_fig_10s')\n df_nan_fig_10s = plot_nans(df_10s, dwelling_id+' 10s sample rate')\n print('df_nan_table_10s')\n df_nan_table_10s = df_nan_checker(df_10s, 0)\n\n print('Length of combined df_hour: %s' % len(df_hour))\n print('df_nan_fig_hour')\n df_nan_fig_hour = plot_nans(df_hour, dwelling_id+' 1 hour sample rate')\n print('df_nan_table_hour')\n df_nan_table_hour = df_nan_checker(df_hour, 0)\n\n return df_10s, df_hour, df_nan_table_10s, df_nan_table_hour, df_nan_fig_hour, df_nan_fig_10s",
"def interpolate_missing(y):\n if y.isna().any():\n y = y.interpolate(method='linear', limit_direction='both')\n return y",
"def test_interpolate_values_1_hour_gap(self, forcing_processor):\n forcing_processor.data = {}\n forcing_processor.data['air_temperature'] = [\n (datetime.datetime(2011, 9, 25, 9, 0, 0), 215.0),\n (datetime.datetime(2011, 9, 25, 10, 0, 0), None),\n (datetime.datetime(2011, 9, 25, 11, 0, 0), 235.0),\n ]\n forcing_processor.interpolate_values('air_temperature', 1, 1)\n expected = (datetime.datetime(2011, 9, 25, 10, 0, 0), 225.0)\n assert forcing_processor.data['air_temperature'][1] == expected",
"def _interpolate_meteorological_data(dset, data, rundate):\n rundate = datetime(rundate.year, rundate.month, rundate.day)\n for field, station in [(f, f[4:]) for f in data.keys() if f.startswith(\"met_\")]:\n log.debug(f\"Meteorological data available for station {station}\")\n\n met_time = data[field].pop(\"met_time\")\n flat_list = [item for sublist in met_time for item in sublist]\n met_time_float = np.array([(flat_list[i] - rundate).total_seconds() for i in range(0, len(flat_list))])\n met_time_unique, met_index = np.unique(met_time_float, return_index=True)\n\n diff = len(met_time_float) - len(met_time_unique)\n if diff > 0:\n log.dev(f\"Removed duplicate met data for station {station}\")\n log.dev(\"Do this for the actual obs data also!\")\n if len(met_time_unique) == 1:\n for met_type in data[field].keys():\n data[field][met_type] = np.repeat(data[field][met_type][0], dset.num_obs)\n continue\n\n # Extrapolation one month before/after\n # (this is overkill, most of these values will be removed later when taking the diagonal)\n min_time = min(met_time_unique) - 31 * 86400\n max_time = max(met_time_unique) + 31 * 86400\n met_time_unique = np.hstack((np.array(min_time), met_time_unique, np.array(max_time)))\n\n for met_type in data[field].keys():\n met_data_array = data[field][met_type]\n flat_list = [item for sublist in met_data_array for item in sublist]\n met_data_array = np.array([flat_list[i] for i in met_index])\n met_data_array = np.hstack((met_data_array[0], met_data_array, met_data_array[-1]))\n data[field][met_type] = interpolation.interpolate(\n met_time_unique, met_data_array, dset.obs_time, kind=\"cubic\"\n )\n\n return data",
"def interpolate_timeseries(self, x, t, **kw):\n v, t_v = self.timeseries(x, rmnans=True)\n kw.update(dict(bounds_error=False))\n interpolant = sp.interpolate.interp1d(t_v, v, **kw)\n return interpolant(t)",
"def pad(input_data):\n # source : https://stackoverflow.com/questions/6518811/interpolate-nan-values-in-a-numpy-array \n data = input_data.copy()\n bad_indexes = np.isnan(data)\n good_indexes = np.logical_not(bad_indexes)\n good_data = data[good_indexes]\n interpolated = np.interp(bad_indexes.nonzero()[0], good_indexes.nonzero()[0], good_data)\n data[bad_indexes] = interpolated\n return data",
"def getEPADailyData(dateint, dt_ind, month, epa_df, yr):\n\n try:\n start = dateint + dt_ind * 10000\n end = start + 10001\n dly_epa_df = epa_df[(epa_df.created >= start) & (epa_df.created < end)]\n dly_epa_df.reset_index(inplace=True, drop=True)\n\n new_df = pd.DataFrame(columns=['lat', 'lon', 'utc', 'parameter', 'epa_pm25_unit', 'epa_pm25_value', 'raw_concentration', 'aqi', 'category', 'site_name', 'agency_name', 'full_aqs_code', 'intl_aqs_code', 'created'])\n for sitenm in dly_epa_df.site_name.unique():\n indx_ct = 0\n site_df = dly_epa_df[dly_epa_df.site_name == sitenm]\n for i in site_df.created.unique():\n indx_ct += 1\n new_df = pd.concat([new_df,site_df.iloc[indx_ct - 1:indx_ct]],ignore_index=True)\n\n if i != site_df.created.max(): # Don't interpolate the last record\n tmp_df = site_df.iloc[indx_ct - 1:indx_ct][['lat', 'lon', 'utc', 'parameter', 'epa_pm25_unit', 'category', 'site_name', 'agency_name', 'full_aqs_code', 'intl_aqs_code']]\n for j in range(1,6):\n new_dt = i + j * 10\n tmp_df['created'] = int(new_dt)\n tmp_df['epa_pm25_value'] = np.nan\n tmp_df['raw_concentration'] = np.nan\n tmp_df['aqi'] = np.nan\n new_df = pd.concat([new_df,tmp_df],ignore_index=True)\n\n # Convert aqi to numerica for so that it gets interpolated\n new_df[['aqi']] = new_df[['aqi']].replace(\"nan\", np.nan, regex=True)\n new_df[['aqi']] = new_df[['aqi']].apply(pd.to_numeric)\n\n new_df = new_df.interpolate(method='linear', limit_direction='forward', axis=0)\n\n int_epa_df = new_df[(new_df.created >= start) & (new_df.created < (end - 1))]\n int_epa_df.reset_index(inplace=True, drop=True)\n \n # Write to S3\n s3 = s3fs.S3FileSystem()\n myopen = s3.open\n write('midscapstone-whos-polluting-my-air/EpaDaily/epa_20{2}{0}{1:02}.parquet'.format(month, dt_ind, yr), int_epa_df, compression='GZIP', open_with=myopen)\n s3_resource = boto3.resource('s3')\n s3_resource.Object('midscapstone-whos-polluting-my-air', 'EpaDaily/epa_20{2}{0}{1:02}.parquet'.format(month, dt_ind, yr)).Acl().put(ACL='public-read')\n\n except Exception as e:\n print(\"*** EXCEPTION IN GET EPA DAILY DATA *** {}\".format(e))\n return int_epa_df",
"def fillna(ts: TimeSeries, fill: float = 0) -> TimeSeries:\n\n return TimeSeries.from_times_and_values(ts.time_index(), ts.pd_dataframe().fillna(value=fill))",
"def test_interpolate_values_2_hour_gap(self, forcing_processor):\n forcing_processor.data = {}\n forcing_processor.data['air_temperature'] = [\n (datetime.datetime(2011, 9, 25, 9, 0, 0), 215.0),\n (datetime.datetime(2011, 9, 25, 10, 0, 0), None),\n (datetime.datetime(2011, 9, 25, 11, 0, 0), None),\n (datetime.datetime(2011, 9, 25, 12, 0, 0), 230.0),\n ]\n forcing_processor.interpolate_values('air_temperature', 1, 2)\n expected = (datetime.datetime(2011, 9, 25, 10, 0, 0), 220.0)\n assert forcing_processor.data['air_temperature'][1] == expected\n expected = (datetime.datetime(2011, 9, 25, 11, 0, 0), 225.0)\n assert forcing_processor.data['air_temperature'][2] == expected",
"def interpolate_series(t,y,start,stop):\n \n # daily timestamsp\n numdays = (stop-start).days\n D = [start + datetime.timedelta(days=x) for x in range(0,numdays)]\n T = [time.mktime(date.timetuple()) for date in D]\n \n # interpolated variable\n Y = np.interp(T, t, y)\n \n return (D, Y)",
"def check_and_interpolate_nans(df):\n nan_count = df.isna().sum().sum()\n if nan_count > 0:\n df.interpolate(method='linear', inplace=True)\n return df",
"def interpolate(self, **kwargs): # noqa: PR01\n return DataFrameDefault.register(pandas.DataFrame.interpolate)(self, **kwargs)",
"def read_weather_data():\n # Check if UTC to gmt+1 conversion is being handled correctly\n weather = pd.read_csv('//datc//opschaler//weather_data//knmi_10_min_raw_data//output//df_combined_uncleaned.csv',\n delimiter='\\t', comment='#',\n parse_dates=['datetime'])\n weather = weather.set_index(['datetime'])\n return weather",
"def interpolate(self, new_times):\n\n # check if the times are to be grabbed from a ping_data object\n if isinstance(new_times, ping_data):\n new_times = new_times.ping_time\n\n # and interpolate\n self.data[:] = np.interp(self.ping_time, new_times,\n self.data, left=np.nan, right=np.nan)\n self.ping_time = new_times.copy()",
"def clean_station_data(station_df):\n # TODO implement data preparation here\n # Fix the datetime field\n\n # Cast to numeric fields where necessary\n\n # Interpolate missing data",
"def fill_nan(x):\n (n_rows, wdw) = x.shape\n new_x = np.zeros((n_rows,wdw)); new_x[:] = np.nan\n for i in range(n_rows):\n indMissing = np.where(np.isnan(x[i,:]))[0]\n l = len(x[i,indMissing]) #number of MVs\n if l < 4*wdw/5: #20% available values otherwise discarded\n new_x[i,:] = x[i,:]\n if l > 0 and indMissing[0] == 0: #missing value at index 0 \n c = 0\n while c + 1 < len(indMissing) and indMissing[c+1] == indMissing[c] + 1:\n c += 1\n new_x[i,:c+1] = x[i,c+1] #first nans replaced by first non nan value\n indMissing = np.where(np.isnan(new_x[i,:]))[0]\n l = len(new_x[i,indMissing])\n if l > 0 and indMissing[0] > 0:\n new_x[i,:] = interpolate1d(new_x[i,:]) #interpolate intermediate nans\n ind = np.where(~np.isnan(new_x).all(axis=1))[0]\n new_x = new_x[ind] #remove NaNs \n \n return new_x, ind",
"def fill_missing_values(\n series: TimeSeries, fill: Union[str, float] = \"auto\", **interpolate_kwargs\n) -> TimeSeries:\n raise_if_not(\n isinstance(fill, str) or isinstance(fill, float),\n \"`fill` should either be a string or a float\",\n logger,\n )\n raise_if(\n isinstance(fill, str) and fill != \"auto\",\n \"invalid string for `fill`: can only be set to 'auto'\",\n logger,\n )\n\n if fill == \"auto\":\n return _auto_fill(series, **interpolate_kwargs)\n return _const_fill(series, fill)",
"def check_nan(wseries: pd.Series) -> pd.Series:\n\n if len(wseries[pd.Series([\n (type(val) == str or isnan(val)) for val in wseries\n ], index=wseries.index)]) == 0:\n return wseries # nothing to change\n\n # ensure that all are either float or nan\n def _float_or_nan(ent):\n \"\"\"\n Force values to be either a float or nan first\n \"\"\"\n try:\n return float(ent)\n except ValueError:\n return float('nan')\n\n wseries = pd.Series(\n [_float_or_nan(val) for val in wseries], index=wseries.index,\n name=wseries.name\n )\n\n # continue with interpolation or extrapolation if needed\n inds = where(\n pd.Series([\n (isinstance(val, str) or isnan(val)) for val in wseries\n ], index=wseries.index)\n )[0] # locate the position of the problematic readings\n for ind in inds:\n try:\n wseries[ind] = interpolate_with_s(\n wseries.index[ind], wseries.index[ind-1],\n wseries.index[ind+1],\n wseries[ind-1], wseries[ind+1]\n )\n if isnan(wseries[ind]): # interpolation does not work\n wseries[ind] = interpolate_with_s(\n wseries.index[ind], wseries.index[ind-2],\n wseries.index[ind-1],\n wseries[ind-2], wseries[ind-1]\n )\n except IndexError: # extrapolation\n try:\n wseries[ind] = interpolate_with_s(\n wseries.index[ind], wseries.index[ind-2],\n wseries.index[ind-1],\n wseries[ind-2], wseries[ind-1]\n )\n except IndexError:\n wseries[ind] = interpolate_with_s(\n wseries.index[ind], wseries.index[ind+2],\n wseries.index[ind+1],\n wseries[ind+2], wseries[ind+1]\n )\n return wseries\n\n return wseries",
"def streaming_weather_data(**kwargs):\n df = weather_data(['San Francisco'])\n df['time'] = [pd.Timestamp.now()]\n return df.set_index('time')",
"def test_parse_weather_two_missing_time(self):\n data = copy.deepcopy(self.weather_two)\n\n # Remove a time entry.\n del data['data'][0]['time']\n\n actual = timeseries.parse_weather(data)\n\n # We'll have a NaN in the Index.\n self.assertTrue(actual.index.isna().any())",
"def interpolate_none(self):\n\n # Reset processed data\n self.u_processed_mps = np.copy(self.u_mps)\n self.v_processed_mps = np.copy(self.v_mps)\n self.u_processed_mps[self.valid_data[0, :] == False] = np.nan\n self.v_processed_mps[self.valid_data[0, :] == False] = np.nan"
] | [
"0.7080858",
"0.68412477",
"0.6801546",
"0.67576146",
"0.6569322",
"0.6546012",
"0.6509167",
"0.6458176",
"0.6420076",
"0.6409065",
"0.6344805",
"0.6284957",
"0.62729025",
"0.6232209",
"0.6207936",
"0.6158501",
"0.61142576",
"0.6104147",
"0.606371",
"0.5919506",
"0.59161085",
"0.5889237",
"0.58862394",
"0.5869328",
"0.58683664",
"0.58384943",
"0.58332586",
"0.5822401",
"0.58221453",
"0.58218294"
] | 0.8054154 | 0 |
Join together the meter data, weather data and building metadata into one df data = dict of df's (keys are'building_metadata', 'weather_train', 'weather_test', 'train','test') dataset_name = 'train' or 'test' returns a merged df which includes building_metadata, weather_train (or weather_test) and train (or test) | def join_input_data_and_multi_index(data, dataset_name):
meter_df = data[dataset_name]
building_df = data['building_metadata']
weather_df = data['weather_' + dataset_name]
# join meter and weather data
building_n_meter = meter_df.merge(building_df, on='building_id', how='left')
joined_data = building_n_meter.merge(weather_df, on=['site_id', 'timestamp'], how='left')
# Add time related columns
joined_data['hour'] = joined_data['timestamp'].dt.hour
joined_data['weekday'] = joined_data['timestamp'].dt.dayofweek
joined_data['week_number'] = joined_data['timestamp'].dt.week
joined_data['month'] = joined_data['timestamp'].dt.month
joined_data['is_weekend'] = joined_data['weekday'].apply(lambda x: 1 if x in [0, 6] else 0)
# multi index on building id and timestamp
joined_data = joined_data.set_index(['building_id', 'timestamp']).sort_index()
return joined_data | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_data(self):\n # take care of samples\n patients = self.samples.iloc[:,1].tolist()\n samples = self.samples.iloc[:,0].tolist()\n self.samples = pd.DataFrame(patients,index = samples,columns = ['patient']) # indexed by sample\n #\n # take care of expression data\n cols = self.expression.SYMBOL.tolist() # set new column names to transposed expression_data \n \n new_exp = self.expression.T.ix[1:,:] # transpose\n new_exp.columns = cols\n self.expression = new_exp # add columns\n self.data = pd.merge(self.expression,self.samples,left_index = True,right_index=True) # merged data sets\n #pd.merge(df1,df2,how = 'left',left_index=True,right_index=True) # do a left join",
"def load_data(data_links_list=(\n 'https://raw.githubusercontent.com/JanetMatsen/bacteriopop/master'\n '/raw_data/raw_data.csv',\n 'https://raw.githubusercontent.com/JanetMatsen/bacteriopop/master'\n '/raw_data/sample_meta_info.tsv')):\n\n # Reading data sets from the links provided.\n df1 = pd.read_csv(data_links_list[0],\n error_bad_lines=False)\n df2 = pd.read_csv(data_links_list[1],\n sep='\\t')\n df2 = df2.set_index(df2['project'])\n # fill the Nas id df1 as \". Makes the groupbys behave better.\n df1.fillna('', inplace=True)\n # repleace 'genus' = 'other' with an empty string to be consistent.\n df1.replace(to_replace='other', value='', inplace=True)\n # Removing duplicate columns.\n del df2['project']\n del df2['ID']\n df1 = df1.set_index(df1['project'])\n # Removing duplicate column.\n del df1['project']\n # Joining the two datasets.\n df = df1.join(df2)\n # Uniformity in non-capitalization of column names.\n df.rename(columns={'Kingdom': 'kingdom', 'Phylum': 'phylum',\n 'Class': 'class', 'Order': 'order',\n 'Family': 'family', 'Genus': 'genus',\n 'Length': 'length'}, inplace=True)\n df.index.names = ['sampleID']\n # Rearranging columns so that abundance is the last column.\n df = df[['kingdom',\t'phylum', 'class', 'order',\n 'family', 'genus', 'length', 'oxygen',\n 'replicate', 'week', 'abundance']]\n assert isinstance(df, pd.DataFrame)\n return df",
"def combine(new_data, raw_data):\n return pd.merge(new_data, raw_data, on=[\"location\", \"date\"], how=\"outer\")",
"def get_datasets(business_data_file, enter_data_file, politics_data_file, sport_data_file, tech_data_file):\n # Load data from files\n business_examples = list(open(business_data_file, \"r\").readlines())\n business_examples = [s.strip() for s in business_examples]\n enter_examples = list(open(enter_data_file, \"r\").readlines())\n enter_examples = [s.strip() for s in enter_examples]\n politics_examples = list(open(politics_data_file, \"r\").readlines())\n politics_examples = [s.strip() for s in politics_examples]\n sport_examples = list(open(sport_data_file, \"r\").readlines())\n sport_examples = [s.strip() for s in sport_examples]\n tech_examples = list(open(tech_data_file, \"r\").readlines())\n tech_examples = [s.strip() for s in tech_examples]\n\n datasets = dict()\n datasets['data'] = business_examples + enter_examples + politics_examples + sport_examples + tech_examples\n target = [0 for x in business_examples] + [1 for x in enter_examples] + [2 for x in politics_examples] + [3 for x in sport_examples] + [4 for x in tech_examples]\n datasets['target'] = target\n datasets['target_names'] = ['business_examples', 'enter_examples', 'politics_examples', 'sport_examples', 'tech_examples']\n return datasets",
"def merge_data(agg_cases, lk_info, geolocation_data):\n merged_df = pd.merge(agg_cases, lk_info, left_on='IdLandkreis', right_on = 'Key')\n merged_df[\"RelativFall\"] = merged_df[\"AnzahlFall\"] / merged_df[\"Bev Insgesamt\"]\n merged_df[\"RelativTodesfall\"] = merged_df[\"AnzahlTodesfall\"] / merged_df[\"Bev Insgesamt\"]\n merged_df = pd.merge(merged_df, geolocation_data, left_on=\"Key\", right_on=\"cca_2\")\n return merged_df",
"def merge_data():\n\n\tconfig = Config()\n\tfilename_train, filename_test = \"../data/train.csv\", \"../data/test.csv\" \n\n # create datasets\n\ttrain, test = config.load_data(filename_train, filename_test, print_EDA=False)\n\n # 1. datetime features\n\t# diff between weekday and day?\n\t#weekday - Return the day of the week as an integer, where Monday is 0 and Sunday is 6.\n\t#day - Between 1 and the number of days in the given month of the given year.\n\ttrain['pickup_hour'] = train.pickup_datetime.dt.hour.astype('uint8')\n\ttrain['pickup_day'] = train.pickup_datetime.dt.day.astype('uint8')\n\ttrain['pickup_weekday'] = train.pickup_datetime.dt.weekday.astype('uint8')\n\ttrain['pickup_minute'] = train.pickup_datetime.dt.minute.astype('uint8')\n\ttrain['pickup_month'] = train.pickup_datetime.dt.month.astype('uint8')\n\ttrain['pickup_hour_weekofyear'] = train['pickup_datetime'].dt.weekofyear\n\ttrain['pickup_weekday_hour'] = train['pickup_weekday']*24 + train['pickup_hour']\n\n\ttest['pickup_hour'] = test.pickup_datetime.dt.hour.astype('uint8')\n\ttest['pickup_day'] = test.pickup_datetime.dt.day.astype('uint8')\n\ttest['pickup_weekday'] = test.pickup_datetime.dt.weekday.astype('uint8')\n\ttest['pickup_minute'] = test.pickup_datetime.dt.minute.astype('uint8')\n\ttest['pickup_month'] = test.pickup_datetime.dt.month.astype('uint8')\n\ttest['pickup_hour_weekofyear'] = test['pickup_datetime'].dt.weekofyear\n\ttest['pickup_weekday_hour'] = test['pickup_weekday']*24 + test['pickup_hour']\n\n\t# 2. Location features\n\tdef haversine(lon1, lat1, lon2, lat2):\n\t lon1, lat1, lon2, lat2 = map(np.radians, [lon1, lat1, lon2, lat2])\n\t dlon = lon2 - lon1\n\t dlat = lat2 - lat1\n\t a = np.sin(dlat/2.0)**2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon/2.0)**2\n\t c = 2 * np.arcsin(np.sqrt(a))\n\t km = 6367 * c # AVG_EARTH_RADIUS=6367\n\t miles = km * 0.621371\n\t return miles\n\n\t# def dummy_manhattan_distance(lat1, lng1, lat2, lng2):\n\t# a = haversine_array(lat1, lng1, lat1, lng2)\n\t# b = haversine_array(lat1, lng1, lat2, lng1)\n\t# return a + b\n\n\t# def bearing_array(lat1, lng1, lat2, lng2):\n\t# AVG_EARTH_RADIUS = 6371 # in km\n\t# lng_delta_rad = np.radians(lng2 - lng1)\n\t# lat1, lng1, lat2, lng2 = map(np.radians, (lat1, lng1, lat2, lng2))\n\t# y = np.sin(lng_delta_rad) * np.cos(lat2)\n\t# x = np.cos(lat1) * np.sin(lat2) - np.sin(lat1) * np.cos(lat2) * np.cos(lng_delta_rad)\n\t# return np.degrees(np.arctan2(y, x))\n\n\ttrain['distance'] = haversine(train.pickup_longitude, train.pickup_latitude,\n\t train.dropoff_longitude, train.dropoff_latitude)\n\ttest['distance'] = haversine(test.pickup_longitude, test.pickup_latitude,\n\t test.dropoff_longitude, test.dropoff_latitude)\n\n\n\t# 3. Use outsource data\n\tweatherdata_filename = \"../data/outsource_data/weather_data_nyc_centralpark_2016.csv\"\n\tfastestroute_data_train = \"../data/outsource_data/fastest_train.csv\"\n\tfastestroute_data_test = \"../data/outsource_data/fastest_routes_test.csv\"\n\n\n\twd = pd.read_csv(weatherdata_filename, header=0)\n\twd['date'] = pd.to_datetime(wd.date, format=\"%d-%m-%Y\")\n\twd['pickup_day'] = wd['date'].dt.day\n\twd['snow fall'] = wd['snow fall'].replace('T', 0.05).astype(np.float32) \n\twd['precipitation'] = wd['precipitation'].replace('T', 0.05).astype(np.float32) \n\twd['snow depth'] = wd['snow depth'].replace('T', 0.05).astype(np.float32) \n\n\t# Merge training data with weather data on pickup_day\n\tprint(\"Merging training data with weather data ....\")\n\twd_train = pd.merge(train, wd, on='pickup_day')\n\twd_train = wd_train.drop(['date','maximum temperature','minimum temperature'],axis=1)\n\tgc.collect()\n\n\t# Merge wd_train with fastestroute_data\n\tfastest = pd.read_csv(fastestroute_data_train, header=0)\n\tprint(\"Merging Location data with weather and training data ....\")\n\twd_train_fastest = pd.merge(wd_train, fastest, on='id', how='outer')\n\n\tgc.collect()\n\n\n\tprint(\"===================== CHECK TRAINING DATA =====================\")\n\n\tprint(wd_train_fastest.head(2))\n\tprint(\"Semi-final training data shape is: {}\".format(wd_train_fastest.shape))\n\tprint(\"Training data columns: {}\".format(wd_train_fastest.columns))\n\n\n\t# Use the same outsource data with test set\n\t# merge outsource data with test data as well\n\tft_test_cols = [ 'id', 'starting_street','end_street','total_distance',\t'total_travel_time',\n\t\t\t\t\t'number_of_steps','street_for_each_step','distance_per_step','travel_time_per_step',\n\t 'step_maneuvers','step_direction',\t'step_location_list']\n\tfastest_test = pd.read_csv(fastestroute_data_test, names=ft_test_cols, header=0)\n\n\tprint(\"Merging test data with Location data ....\")\n\ttest = pd.merge(test, fastest_test, on='id', how='outer')\n\ttest = test.drop(['step_location_list','step_direction','step_maneuvers','travel_time_per_step','distance_per_step','street_for_each_step','number_of_steps','starting_street',\n\t 'end_street'], axis=1)\n\tprint(\"Merging test data with weather data ....\")\n\ttest = pd.merge(test, wd, on='pickup_day')\n\n\tprint(\"===================== CHECK TEST DATA =====================\")\n\n\tprint(test.head(2))\n\tprint(\"Semi-final test data shape is: {}\".format(test.shape))\n\tprint(\"Test data columns: {}\".format(test.columns))\n\n\n\n\t# 4. Do more data munging\n\tmask = ((wd_train_fastest.trip_duration > 60) & (wd_train_fastest.distance < 0.05))\n\twd_train_fastest = wd_train_fastest[~mask]\n\tmask = (wd_train_fastest.trip_duration < 60) \n\twd_train_fastest = wd_train_fastest[~mask]\n\tmask = wd_train_fastest.trip_duration > 79200\n\twd_train_fastest = wd_train_fastest[~mask]\n\tmask = wd_train_fastest.distance/(wd_train_fastest.trip_duration/3600) > 60\n\twd_train_fastest = wd_train_fastest[~mask]\n\twd_train_fastest.trip_duration = wd_train_fastest.trip_duration.astype(np.uint16)\n\twd_train_fastest = wd_train_fastest[wd_train_fastest.passenger_count > 0]\n\n\t# 5. Do some data maskig based on location to create jfk and lgo features\n\tjfk_lon = -73.778889\n\tjfk_lat = 40.639722\n\tlga_lon = -73.872611\n\tlga_lat = 40.77725\n\n\twd_train_fastest['jfk_pickup_dist'] = wd_train_fastest.apply(lambda row: haversine(jfk_lon, jfk_lat, row['pickup_longitude'],row['pickup_latitude']), axis=1)\n\twd_train_fastest['lga_pickup_dist'] = wd_train_fastest.apply(lambda row: haversine(lga_lon, lga_lat, row['pickup_longitude'],row['pickup_latitude']), axis=1)\n\twd_train_fastest['jfk_dropoff_dist'] = wd_train_fastest.apply(lambda row: haversine(jfk_lon, jfk_lat, row['dropoff_longitude'],row['dropoff_latitude']), axis=1)\n\twd_train_fastest['lga_dropoff_dist'] = wd_train_fastest.apply(lambda row: haversine(lga_lon, lga_lat, row['dropoff_longitude'],row['dropoff_latitude']), axis=1)\n\n\twd_train_fastest['jfk'] = ((wd_train_fastest['jfk_pickup_dist'] < 2) | (wd_train_fastest['jfk_dropoff_dist'] < 2))\n\twd_train_fastest['lga'] = ((wd_train_fastest['lga_pickup_dist'] < 2) | (wd_train_fastest['lga_dropoff_dist'] < 2))\n\twd_train_fastest = wd_train_fastest.drop(['jfk_pickup_dist','lga_pickup_dist','jfk_dropoff_dist','lga_dropoff_dist'],axis=1)\n\twd_train_fastest['workday'] = ((wd_train_fastest['pickup_hour'] > 8) & (wd_train_fastest['pickup_hour'] < 18))\n\n\n\tprint(\"===================== CHECK TRAINING DATA AGAIN =====================\")\n\n\tprint(wd_train_fastest.head(2))\n\tprint(\"Final training data shape is: {}\".format(wd_train_fastest.shape))\n\tprint(\"Training data columns: {}\".format(wd_train_fastest.columns))\n\n\n\treturn wd_train_fastest, test",
"def get_weather_data():\n keys = ['1364038.csv',\n '1364041.csv',\n '1364042.csv',\n '1364043.csv',\n '1364044.csv',\n '1364046.csv',\n '1364047.csv',\n '1364048.csv',\n '1364051.csv',\n '1364052.csv',\n '1364053.csv',\n '1364054.csv',\n '1364055.csv',\n '1364058.csv',\n '1364059.csv',\n '1364060.csv',\n '1364061.csv',\n '1364062.csv',\n '1364063.csv',\n '1364064.csv',\n '1364066.csv']\n df_weather = import_weather(keys)\n df_weather_dist = df_weather[[\n 'LATITUDE', 'LONGITUDE', 'name']].drop_duplicates().reset_index()\n return df_weather, df_weather_dist",
"def make_dataframe(self):\n logging.info('*** Creating the dataframes from the source files ' )\n \n for k in self.datasets_keys:\n #for k in ['igra2' , 'ncar']:\n \n logging.info('*** Creating the dataframe for the dataset: %s ' , k ) \n \n p_levels = self.data[k]['df']['observations_table']['z_coordinate'][:]\n logging.debug(' Loaded the z_coordinate')\n \n z_type = self.data[k]['df']['observations_table']['z_coordinate_type'][:]\n logging.debug(' Loaded the z_coordinate_type')\n \n obs_variable = self.data[k]['df']['observations_table']['observed_variable'][:]\n logging.debug(' Loaded the observed_variable')\n \n obs_values = self.data[k]['df']['observations_table']['observation_value'][:]\n logging.debug(' Loaded the observation_value')\n \n observation_id = self.data[k]['df']['observations_table']['observation_id'][:]\n logging.debug(' Loaded the observation_id')\n \n units = self.data[k]['df']['observations_table']['units'][:].astype(int)\n logging.debug(' Loaded the units') \n \n report_id = self.data[k]['df']['observations_table']['report_id'][:] \n logging.debug(' Loaded the report_id')\n \n date_time = self.data[k]['df']['observations_table']['date_time'][:]\n logging.debug(' Loaded the date_time (deltas)')\n \n lat , lon = self.data[k]['df']['observations_table']['latitude'][:] , self.data[k]['df']['observations_table']['longitude'][:]\n logging.debug(' Loaded the lat,lon ')\n \n \n self.obs_table_columns = list(self.data[k]['df']['observations_table'].keys() )\n \n self.data[k]['df'].close()\n \n \"\"\" Creating a dataframe \"\"\"\n columns = ['date_time', 'z_coordinate' , 'z_coordinate_type', 'observed_variable' , 'observation_value' , 'report_id' , 'observation_id' , 'latitude' , 'longitude', 'units']\n logging.info(' Loaded the data, creating dataframe ')\n \n df = pd.DataFrame( list(zip( date_time, p_levels, z_type, obs_variable , obs_values, report_id, observation_id , lat , lon, units ) ) , columns = columns ) \n \n \n \"\"\" Storing the dataframe \"\"\" ### try using xarrays ??? \n logging.debug('Storing the DF ' ) \n self.data[k]['dataframe'] = df\n \n logging.debug(' PD dataframe created !!! ')",
"def merge_weather_trails(df_weather, df_hike):\n df_trail_year = pd.merge(\n df_hike, df_weather, how='left', left_on=[\n 'closet_station', 'last_year'], right_on=[\n 'name', 'DATE'])\n df_all_clean = df_trail_year.drop(['DATE', 'name'], axis=1)\n return df_all_clean",
"def clean_data(raw_data, names=const.NAMES, meter_map=const.METER_MAP):\n\n cleaned_data = {}\n local_names = names.copy()\n if 'building_metadata' in local_names:\n local_names.remove('building_metadata')\n\n for name in local_names:\n print(f'Cleaning {name} dataset')\n df = raw_data[name]\n df.timestamp = pd.to_datetime(df.timestamp)\n if name.startswith('weather'):\n df = add_missing_weather_data(df)\n elif name in ['train', 'test']:\n df['meter_type'] = df['meter'].map(meter_map)\n cleaned_data[name] = df\n\n cleaned_data['building_metadata'] = raw_data['building_metadata']\n\n return cleaned_data",
"def build_dataset_join_dfs(pset_dict, pset_name, primary_dfs={}):\n cell_df = primary_dfs['cell'] if 'cell' in primary_dfs else None\n tissue_df = primary_dfs['tissue'] if 'tissue' in primary_dfs else None\n compound_df = primary_dfs['drug'] if 'drug' in primary_dfs else None\n\n join_dfs = {}\n join_dfs['dataset_cell'] = build_dataset_cell_df(\n pset_dict, pset_name, cell_df)\n join_dfs['dataset_tissue'] = build_dataset_tissue_df(\n pset_dict, pset_name, tissue_df)\n join_dfs['dataset_compound'] = build_dataset_compound_df(\n pset_dict, pset_name, compound_df)\n return join_dfs",
"def join():\n dataset_df = pd.read_excel(\"dataset.xlsx\")\n statistics_df = pd.read_excel(\"statistics.xlsx\")\n\n merge_df = pd.merge(dataset_df, statistics_df, on=['patient_identifier'])\n\n writer = pd.ExcelWriter('final_dataset.xlsx', engine='xlsxwriter')\n merge_df.to_excel(writer, sheet_name='Sheet1')\n writer.save()",
"def train_test_data_df(train_data_file, test_data_file):\n dtype_dict = {\n \"age\": np.int32,\n \"education-num\": np.int32,\n \"capital-gain\": np.int32,\n \"capital-loss\": np.int32,\n \"hours-per-week\": np.int32\n }\n cols = [i for i in range(15) if i != 2]\n train_data = pd.read_csv(train_data_file, sep=\", \", header=0, dtype=dtype_dict, na_values=\"?\", usecols=cols)\n train_data = train_data.dropna(axis=0, how=\"any\")\n test_data = pd.read_csv(test_data_file, sep=\", \", header=0, dtype=dtype_dict, na_values=\"?\", usecols=cols)\n test_data = test_data.dropna(axis=0, how=\"any\")\n return train_data, test_data",
"def data_merge(detector_fields):\n print(\"Merging final data...\")\n\n # load files that contain phase and I/O processed data and store as dfs\n phase_data = pd.read_csv(results_folder + 'phases/processed/clean_merged_phases.csv', header=0,\n skipinitialspace=True, usecols=output_fields)\n detection_data = pd.read_csv(results_folder + 'io/io_out.csv', header=0, skipinitialspace=True,\n usecols=detector_fields)\n phase_df = pd.DataFrame(phase_data)\n detection_df = pd.DataFrame(detection_data)\n\n # merge the two files based on their Date and Time fields\n output = pd.merge(phase_df, detection_df, on=['Date', 'Time'])\n\n # store the output with any duplicates dropped and create a final CSV file\n merged_df = output.drop_duplicates()\n merged_df.to_csv(results_folder + 'dataset.csv', sep=',', index=False)\n\n print(\"Data merged!\")\n print(\"Main dataset available: \" + results_folder + 'dataset.csv')\n\n # return location of dataset\n return results_folder + 'dataset.csv'",
"def existing_data(self):\n # Set the directory and file name\n data_summary_dir = op.join('../logs', self.name, 'data_summary')\n file_name = 'Train_Test_Summary_generative.csv'\n\n # Read the csv and obtain the train data list\n df = pd.read_csv(op.join(data_summary_dir, file_name))\n train_data = df['Train Data'].dropna().values.tolist()\n test_data = df['Test Data'].dropna().values.tolist()\n\n train_data_list, test_data_list = [], []\n for single_train in train_data:\n data_name = single_train.split('_')[0]\n if data_name == 'LTRC':\n series = single_train.split('_')[3] + '_' + single_train.split('_')[4]\n else:\n series = single_train.split('_')[3] + '_' + single_train.split('_')[4] + '_' + single_train.split('_')[5]\n full_data_name = single_train.split('_')[0] + '_' + single_train.split('_')[1] + '_' + single_train.split('_')[2] + '_' + series\n train_data_list.append(full_data_name)\n\n for single_test in test_data:\n data_name = single_test.split('_')[0]\n if data_name == 'LTRC':\n series = single_test.split('_')[3] + '_' + single_test.split('_')[4]\n else:\n series = single_test.split('_')[3] + '_' + single_test.split('_')[4] + '_' + single_test.split('_')[5]\n full_data_name = single_test.split('_')[0] + '_' + single_test.split('_')[1] + '_' + single_test.split('_')[2] + '_' + series\n test_data_list.append(full_data_name)\n\n # Obtain the label map and CT list and file names\n label_map_list = glob(op.join(self.save_root_dir, 'source_data_2', '*'))\n ct_list = glob(op.join(self.save_root_dir, 'target_data_2', '*'))\n\n label_map_files = [single_file.split('/')[-1] for single_file in label_map_list]\n ct_files = [single_file.split('/')[-1] for single_file in ct_list]\n label_map_files.sort(), ct_files.sort()\n\n # Initialize empty list\n existing_train_lm, existing_train_ct = [], []\n existing_test_lm, existing_test_ct = [], []\n\n for single_lm, single_ct in zip(label_map_files, ct_files):\n\n ct_data_name = single_ct.split('_')[0] + '_' + single_ct.split('_')[1] + '_' + single_ct.split('_')[2]\n lm_data_name = single_lm.split('_')[0] + '_' + single_lm.split('_')[1] + '_' + single_lm.split('_')[2]\n\n assert ct_data_name == lm_data_name, 'Data is not the same.'\n\n data_name = single_ct.split('_')[0]\n if data_name == 'LTRC':\n series = single_ct.split('_')[3] + '_' + single_ct.split('_')[4]\n else:\n series = single_ct.split('_')[3] + '_' + single_ct.split('_')[4] + '_' + single_ct.split('_')[5]\n full_data_name = single_ct.split('_')[0] + '_' + single_ct.split('_')[1] + '_' + single_ct.split('_')[2]\\\n + '_' + series\n\n if full_data_name in train_data_list:\n existing_train_lm.append(single_lm)\n existing_train_ct.append(single_ct)\n if full_data_name in test_data_list:\n existing_test_lm.append(single_lm)\n existing_test_ct.append(single_ct)\n existing_train_data = [existing_train_lm, existing_train_ct]\n existing_test_data = [existing_test_lm, existing_test_ct]\n return existing_train_data, existing_test_data",
"def setup_merged_df(obs_df):\n obs_df = obs_df.assign(height=obs_df[\"measurement\"], weight=obs_df[\"measurement\"])\n obs_df.loc[obs_df.param == \"WEIGHTKG\", \"height\"] = np.NaN\n obs_df.loc[obs_df.param == \"HEIGHTCM\", \"weight\"] = np.NaN\n heights = obs_df[obs_df.param == \"HEIGHTCM\"]\n weights = obs_df[obs_df.param == \"WEIGHTKG\"]\n merged = heights.merge(\n weights, on=[\"subjid\", \"agedays\", \"ageyears\", \"sex\"], how=\"outer\"\n )\n only_needed_columns = merged.drop(\n columns=[\n \"param_x\",\n \"measurement_x\",\n \"clean_value_x\",\n \"weight_x\",\n \"id_y\",\n \"param_y\",\n \"measurement_y\",\n \"clean_value_y\",\n \"height_y\",\n ]\n )\n clean_column_names = only_needed_columns.rename(\n columns={\n \"clean_cat_x\": \"height_cat\",\n \"include_x\": \"include_height\",\n \"height_x\": \"height\",\n \"clean_cat_y\": \"weight_cat\",\n \"include_y\": \"include_weight\",\n \"weight_y\": \"weight\",\n \"reason_y\": \"reason\",\n \"id_x\": \"id\",\n }\n )\n clean_column_names[\"bmi\"] = clean_column_names[\"weight\"] / (\n (clean_column_names[\"height\"] / 100) ** 2\n )\n clean_column_names[\"rounded_age\"] = np.around(clean_column_names.ageyears)\n clean_column_names[\"include_both\"] = (\n clean_column_names[\"include_height\"] & clean_column_names[\"include_weight\"]\n )\n return clean_column_names",
"def predict_energy_consumption(buildings):\n forecasts = [forecast_for_building(building) for i, building in buildings.iterrows()]\n df = pd.concat(forecasts)\n df.drop(columns=\"id\", inplace=True)\n df = buildings.merge(df, left_on=\"id\", right_on=\"building_id\")\n df[\"meter\"] = 0\n df[\"floor_count\"] = df[\"floorcount\"]\n df[\"air_temperature\"] = df[\"temp\"]\n df[\"relative_humidity\"] = df[\"humidity\"]\n df[\"dew_temperature\"] = df[\"air_temperature\"] - ((100 - df[\"relative_humidity\"]) / 5)\n df[\"precip_depth_1_hr\"] = np.nan\n df[\"timestamp\"] = pd.to_datetime(df[\"date\"])\n df[\"wind_direction\"] = df[\"deg\"]\n df[\"wind_speed\"] = df[\"speed\"]\n\n df.drop(columns=[\"id\", \"name\", \"floorcount\", \"latitude\", \"longitude\", \"user_id\", \"temp\", \"feels_like\", \"temp_min\",\n \"temp_max\", \"pressure\", \"sea_level\", \"grnd_level\", \"humidity\", \"temp_kf\", \"main\", \"description\",\n \"icon\", \"speed\", \"deg\", \"date\"], inplace=True)\n\n df_temp = df.copy(deep=True)\n for i in range(1, 4):\n df_temp[\"meter\"] += 1\n df = pd.concat([df, df_temp])\n del df_temp\n\n cfg = {\n 'circular_timestamp_encoding': False,\n 'log_transform_square_feet': True,\n 'log_transform_area_per_floor': True,\n 'label_square_feet_outlier': True,\n 'label_area_per_floor_outlier': True,\n 'encode_wind_direction': False,\n 'include_feels_like': True,\n 'fill_na_with_zero': False,\n 'add_lag_features': True,\n 'lag_columns': ['air_temperature', 'dew_temperature', 'cloud_coverage'],\n 'lag_windows': [6, 24],\n }\n [df] = build_features(df, cfg=cfg)\n\n df.reset_index(inplace=True, drop=True)\n building_ids = df[\"building_id\"]\n timestamps = df[\"timestamp\"]\n df.drop(columns=[\"timestamp\", \"month\", \"wind_direction\", \"wind_speed\", \"building_id\"], inplace=True)\n\n model_endpoint = \"http://model:5001/predict\"\n data = df.to_json()\n response = requests.get(model_endpoint, json=data).json()\n\n predictions = pd.DataFrame({\"reading\": response[\"prediction\"],\n \"building_id\": building_ids,\n \"meter\": df[\"meter\"],\n \"timestamp\": timestamps,\n \"air_temperature\": df[\"air_temperature\"]})\n return predictions",
"def load_data(conso_train_file='../input/conso_train.csv',\n meteo_train_file='../input/meteo_train.csv',\n meteo_test_file='../input/meteo_prev.csv'):\n # Loading the weather data\n train_meteo = pd.read_csv(meteo_train_file, sep=';')\n test_meteo = pd.read_csv(meteo_test_file, sep=';')\n\n # Marking the type and merge the train/test dataframes\n train_meteo['type'] = 'train'\n test_meteo['type'] = 'test'\n data_meteo = pd.concat([train_meteo, test_meteo])\n # Correct the weather data\n data_meteo = correct_weather_data(data_meteo)\n\n # Load the train consumption\n train_conso = pd.read_csv(conso_train_file, sep=';')\n # Correct the consumption data\n train_conso = correct_conso_data(train_conso)\n\n # Mark the type and merge the consumption/weather dataframes\n train_conso['type'] = 'train'\n data = pd.merge(train_conso, data_meteo,\n on=['date', 'type'], how='outer', sort=True)\n\n # Complete the test dates\n data = add_test_dates(data)\n # Correct the test data from the merging\n data = correct_test_data(data)\n\n return data",
"def group_data():\n\n # Merge on Departure.\n\n # Merge on Arrival.\n\n data = pd.read_csv(path + \"/data/public/public_train.csv\")[[\"DateOfDeparture\", \"Arrival\"]]\n data['DateOfDeparture'] = pd.to_datetime(data['DateOfDeparture'])\n\n arrival = join_cleaned_data().\\\n rename(columns={'Date': 'DateOfDeparture', 'Airport': 'Arrival'}).\\\n set_index(\"DateOfDeparture\")\n\n merged_arrv = pd.merge(data, arrival, on=[\"DateOfDeparture\", \"Arrival\"], how=\"left\")\n\n # Rename and drop columns.\n\n merged_arrv.columns = [c + \"_Arrival\" if c not in [\"DateOfDeparture\",\n \"DateOfArrival\",\n \"Arrival\",\n \"WeeksToDeparture\"]\n else c\n for c in merged_arrv.columns]\n print merged_arrv\n merged_arrv = merged_arrv.drop([\"Arrival\"], axis=1)\n\n # Concatenate the two fields.\n # merged_all = pd.concat([merged_arrv, merged_dept], axis=1)\n\n merged_all = merged_arrv.\\\n convert_objects(convert_numeric=True)\n merged_all.to_csv(path + \"/Submission/temperatures.csv\")",
"def combine_data(self):\n for country in config.COUNTRIES:\n frames = []\n for year in config.years:\n incidence_path = (config.raw_data_path / country / 'complete'\n / (str(year) + '_' + str(year + 1) + '.csv'))\n\n if incidence_path.exists() and incidence_path.is_file():\n df_incidence = pd.read_csv(incidence_path)\n\n wiki_path1 = config.raw_data_path / ('wikipedia_' +\n country) / \\\n 'complete' / (\n str(year) + '.csv')\n wiki_path2 = config.raw_data_path / ('wikipedia_' +\n country) / \\\n 'complete' / (\n str(year + 1) + '.csv')\n\n if wiki_path1.exists() and wiki_path1.is_file():\n df_wiki1 = pd.read_csv(wiki_path1)\n df_wiki1 = df_wiki1.rename(columns={'Week': 'week'})\n df_incidence = pd.merge(\n df_wiki1, df_incidence, on='week', how='right')\n\n if wiki_path2.exists() and wiki_path2.is_file():\n df_wiki2 = pd.read_csv(wiki_path2)\n df_wiki2 = df_wiki2.rename(columns={'Week': 'week'})\n df_incidence = pd.merge(\n df_wiki2, df_incidence, on='week', how='right')\n\n for col_name in df_incidence.columns:\n if col_name[-1] == 'x':\n if col_name[:-2] + '_y' in df_incidence.columns:\n df_incidence[col_name[:-2]] = df_incidence[\n col_name].fillna(\n df_incidence[col_name[:-2] + '_y'])\n df_incidence = df_incidence.drop(\n columns=[col_name,\n col_name[:-2] + '_y'])\n\n frames.append(df_incidence)\n\n df_country = pd.concat(frames)\n df_country['date'] = pd.to_datetime(\n df_country.week.add('-0'), format='%Y-%W-%w')\n df_country = df_country.sort_values(by=\"date\")\n\n if 'cases' in df_country.columns:\n df_country.drop(columns=['cases'])\n\n file_path = config.combined_data_path / (country + '.csv')\n\n df_country.to_csv(file_path, index=False)",
"def load_data():\n data_path = os.path.join('qual-o-mat-data', 'data', '2019', 'europa')\n data_keys = [\"answer\", \"comment\", \"opinion\", \"party\", \"statement\"]\n raw_data = dict()\n all_data = dict()\n\n # Create a dictionary of type <string, DataFrame> that contains the data from all JSON files\n for dk in data_keys:\n json_file = os.path.join(data_path, dk + \".json\")\n with open(json_file, \"r\") as fh:\n raw_data[dk] = json.load(fh)\n all_data[dk] = pd.DataFrame(raw_data[dk])\n\n\n # Based on the opinion data, merge all other data frames on their ID fields to get usable names instead of just ID numbers\n merged_df = all_data[\"opinion\"].copy()\n for to_merge in [\"party\", \"statement\", \"comment\", \"answer\"]:\n merged_df = merged_df.merge(all_data[to_merge], how='inner', left_on=[to_merge], right_on=['id'])\n\n #print(mdf.head())\n return merged_df, all_data, raw_data",
"def ana_merge_datas(datas):\n return {\n 'searches':ana_merge_searches(datas),\n 'senzory_map':ana_merge_senzory_map(datas)\n }",
"def load_data():\n train = pd.read_csv(\"../input/train.csv\", dtype={\"Age\": np.float64}, )\n test = pd.read_csv(\"../input/test.csv\", dtype={\"Age\": np.float64}, )\n\n train = train.set_index('PassengerId')\n test = test.set_index('PassengerId')\n\n train = train.apply(preprocess, axis=1)\n test = test.apply(preprocess, axis=1)\n\n x_train = train.drop(['Survived'], axis=1)\n y_train = train['Survived']\n x_test = test\n return {'train': {'x': x_train, 'y': y_train},\n 'test': {'x': x_test},\n 'full_features': pd.concat([x_train, x_test])}",
"def create_metadata(data_dir):\n pool = multiprocessing.Pool()\n\n train_feature_paths = [os.path.join(data_dir, \"train_features_{}.jsonl\".format(i)) for i in range(6)]\n train_records = list(pool.imap(read_metadata_record, raw_feature_iterator(train_feature_paths)))\n train_records = [dict(record, **{\"subset\": \"train\"}) for record in train_records]\n\n test_feature_paths = [os.path.join(data_dir, \"test_features.jsonl\")]\n test_records = list(pool.imap(read_metadata_record, raw_feature_iterator(test_feature_paths)))\n test_records = [dict(record, **{\"subset\": \"test\"}) for record in test_records]\n\n all_metadata_keys = [\"sha256\", \"appeared\", \"subset\", \"label\", \"avclass\"]\n ordered_metadata_keys = [k for k in all_metadata_keys if k in train_records[0].keys()]\n metadf = pd.DataFrame(train_records + test_records)[ordered_metadata_keys]\n metadf.to_csv(os.path.join(data_dir, \"metadata.csv\"))\n return metadf",
"def data_and_metadata(self):\n data = self.data\n if self._metadata is not None and not self._metadata.empty:\n data = [self._metadata, data]\n data = pd.concat(data, axis=1)\n return data",
"def merge_weather(weather):\n\n weather1 = weather[weather[\"Station\"] == 1]\n weather2 = weather[weather[\"Station\"] == 2]\n\n rows, rows1, rows2 = (weather.shape[0],\n weather1.shape[0],\n weather2.shape[0])\n\n weather = pd.merge(weather1, weather2, on=\"Date\")\n weather.drop([\"Station_x\", \"Station_y\"], axis=1, inplace=True)\n\n newrows = weather.shape[0]\n # sanity check the rows\n assert(rows1 + rows2 == rows)\n assert(rows1 == newrows)\n\n return weather",
"def read_training(index_columns=None, both=False, weather=False):\n if weather:\n raw_X_train = pd.read_csv('data\\\\train_X.csv', parse_dates=['date'])\n raw_weather = pd.read_csv('data\\\\weather_data.csv', parse_dates=['date'])\n\n raw_X_train = ffill_nans(raw_X_train)\n raw_X_train = raw_X_train.merge(raw_weather, how='left', on=['date','hour'])\n raw_X_train = raw_X_train.set_index(index_columns)\n\n else:\n raw_X_train = pd.read_csv(\n 'data\\\\train_X.csv',\n parse_dates=['date'],\n index_col=index_columns)\n if both:\n raw_y_train = pd.read_csv(\n 'data\\\\train_y.csv',\n parse_dates=['date'],\n index_col=index_columns)\n\n return raw_X_train, raw_y_train\n \n return raw_X_train",
"def ConcatDF(train_set, test_set):\n return pd.concat([train_set, test_set], sort=True).reset_index(drop=True)",
"def create_dataframe_for_training(data):\n feature_column_name = 'X'\n #data_cp = data[['label']].copy()\n for i, row in tqdm(data.iterrows(), total=len(data)):\n all_features = f'{row.claimant} {row.claim} {row.article_content}'\n data.loc[i, feature_column_name] = all_features\n\n return data[feature_column_name]",
"def load_data():\n \n data = datasets.load_iris()\n df = pd.DataFrame(data.data, columns = data.feature_names)\n df2 = pd.DataFrame(data.target, columns = [\"target\"])\n\n return df.join(df2)"
] | [
"0.59869367",
"0.57781816",
"0.5758608",
"0.563408",
"0.56181586",
"0.55629724",
"0.5561206",
"0.5549236",
"0.5505305",
"0.55047166",
"0.54905367",
"0.54782295",
"0.54737127",
"0.5469189",
"0.544329",
"0.54359186",
"0.54143375",
"0.5409169",
"0.5407379",
"0.53848493",
"0.5383751",
"0.537155",
"0.53513575",
"0.5328619",
"0.53272426",
"0.53148323",
"0.5314481",
"0.5307224",
"0.5300109",
"0.5293462"
] | 0.69934803 | 0 |
Split the joined data into a dict with a df for each meter type | def split_on_meter_type(joined_data, meter_types):
joined_data_dict = {meter_type: joined_data[joined_data['meter_type'] == meter_type]
for meter_type in meter_types}
return joined_data_dict | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_meter_data_for_time_slice(apt_no, start_time, end_time):\n if apt_no in ['102A', 102]:\n apt_no = '102A'\n\n logger.debug(\"sMap: Getting meter data for %s between %s and %s\", apt_no, start_time, end_time)\n\n query = (\"select data in ('\" + str(start_time) + \"','\" + str(end_time) + \"') \"\n \"limit 200000 \"\n \"where Metadata/LoadLocation/FlatNumber ='\" + str(apt_no) + \"' and \"\n \"Metadata/Extra/PhysicalParameter='Power'\")\n\n r = requests.post(url, data=query)\n # logger.debug (\"%s\",r)\n payload = r.json()\n # logger.debug(\"Payload:%s\", payload)\n\n if apt_no in ['102A', 102]:\n apt_no = 102\n meters = retrieve_meter_info(apt_no)\n logger.debug(\"Meters: %s\", meters)\n\n streams = []\n meter_type = []\n l_meters = range(0, len(meters))\n for i in l_meters:\n uuid = payload[i]['uuid']\n\n # Get meter type based on uuid\n for meter in meters:\n if meter['uuid'] == uuid:\n m_type = meter['type']\n # logger.debug (uuid, m_type)\n\n meter_type.append(m_type)\n streams.append(np.array(payload[i]['Readings']))\n # logger.debug(\"Streams: %s\", streams)\n\n if len(streams[0]) > 0:\n\n df = [pd.DataFrame({'time': readings[:, 0] / 1000, 'power': readings[:, 1],\n 'type': [meter_type[i]] * len(readings)},\n columns=['time', 'power', 'type']) for i, readings in enumerate(streams)]\n else:\n df = []\n\n return df",
"def generate_record(self, data_dictionaries, group_by):\n result = {}\n\n for one_measurement in data_dictionaries:\n time = one_measurement['datetime']\n\n if isinstance(time, str):\n if self.timezone:\n time = arrow.get(time).shift(hours=6) # TODO: fix utc conversion\n else:\n time = arrow.get(time)\n\n record = Record(self.name, self.lat, self.lon, self.height, time)\n\n del one_measurement['datetime']\n\n one_measurement = {k: float(v) for k, v in one_measurement.items()}\n\n record.merge(one_measurement)\n\n key = group_by(time)\n \n if key == '2016-04-01_00':\n break\n\n record_string = record.little_r_report()\n\n try:\n result[key].append(record_string)\n except KeyError:\n result[key] = [record_string]\n\n return result",
"def extract_data(self):\n values = {}\n for injkey in self.data_sets.keys():\n values[injkey] = {}\n alldata = self.data_sets[injkey]\n paramkeys = alldata['params'].keys()\n for datakey in alldata.keys():\n if not datakey == 'params':\n values[injkey][datakey] = {}\n values[injkey][datakey]['metric_val'] = {}\n values[injkey][datakey]['metric_val']['vals'] = []\n for paramkey in paramkeys:\n values[injkey][datakey][paramkey] = {}\n values[injkey][datakey][paramkey]['vals'] = []\n trials = alldata[datakey]\n for trial_num in trials.keys():\n trial = trials[trial_num]\n values[injkey][datakey]['metric_val']['vals'] \\\n .append(trial['metric_val'])\n values[injkey][datakey]['metric_val']['type'] \\\n = trial['metric']\n values[injkey][datakey]['metric_val']['units'] \\\n = 'dimensionless'\n param_vals = trial['params']\n for param_name in param_vals.keys():\n val, units = self.parse_pint_string(\n pint_string=param_vals[param_name]\n )\n values[injkey][datakey][param_name]['vals'] \\\n .append(float(val))\n values[injkey][datakey][param_name]['units'] \\\n = units\n self.values = values",
"def prepare_data(groups):\n all_dicts = []\n for idx, group in groups:\n res_dict = {'organism': group.organism.iloc[0]}\n for g_idx, row in group.iterrows():\n if pd.notna(row.label):\n res_dict[row.cmp_name] = {'label': row.label, 'mic': row.MIC}\n else:\n res_dict[row.cmp_name] = {'label': '', 'mic': row.MIC}\n all_dicts.append(res_dict)\n return all_dicts",
"def _make_meta(self):\n available_meas_times = list()\n available_intervals = list()\n drill_by = list()\n related = list()\n last_data_set_instance = dict()\n\n if self._data['report_save_historical_instances_ind'] == 'Y':\n # last measurement instance\n res = self._db.Query(\"\"\"SELECT *\n FROM report_data_set_instance\n WHERE\n `element_id`=%s\n AND `segment_value_id` = %s\n ORDER BY measurement_time DESC\n LIMIT 0, 1\"\"\",(self._id, self._segment_value_id))\n if res:\n last_data_set_instance = self._db.record[0]\n last_data_set_instance['measurement_time'] = self._formatter.format_date(last_data_set_instance['measurement_time'])\n\n # available measurement instances\n res = self._db.Query(\"\"\"SELECT *\n FROM report_data_set_instance\n WHERE\n `element_id`=%s\n AND `segment_value_id` = %s\n ORDER BY measurement_time DESC\"\"\",(self._id, self._segment_value_id))\n if res:\n for data_set_instance in self._db.record:\n data_set_instance['measurement_time'] = self._formatter.format_date(data_set_instance['measurement_time'])\n available_meas_times.append(data_set_instance)\n \n\n # get drill by. not for this version\n\n # available measurement intervals\n if self._data['report_primary_shared_dimension_id'] is None:\n self._data['report_primary_shared_dimension_id'] = 0\n\n self._db.Query(\"\"\"\n SELECT measurement_interval.*,\n dashboard_element.element_id\n FROM dashboard_element\n LEFT JOIN measurement_interval\n ON measurement_interval.measurement_interval_id = dashboard_element.measurement_interval_id\n WHERE\n (dashboard_element.`element_id`<>%s\n AND dashboard_element.measurement_interval_id <> %s\n AND dashboard_element.shared_measure_id = %s\n AND dashboard_element.`type` = 'internal report'\n AND ifnull(dashboard_element.report_used_for_drill_to_ind,'N') = %s\n AND ifnull(dashboard_element.report_primary_shared_dimension_id,0) = %s\n AND ifnull(dashboard_element.segment_id,0) = %s)\n OR\n dashboard_element.`element_id`=%s\n AND 3=4\n \n GROUP BY measurement_interval.measurement_interval_id\n ORDER BY\n measurement_interval.display_sequence,\n dashboard_element.name ASC\n \"\"\",\n (self._id,\n self._data['measurement_interval_id'],\n self._data['shared_measure_id'],\n self._data['report_used_for_drill_to_ind'],\n self._data['report_primary_shared_dimension_id'],\n self._data['segment_id'],\n self._id))\n\n\n for interval in self._db.record:\n interval['report_data_set_instance_id'] = 0\n available_intervals.append(interval)\n\n # see related\n self._db.Query(\"\"\"SELECT e.*\n FROM dashboard_element_topic det, dashboard_element e\n WHERE e.element_id = det.dashboard_element_id\n AND dashboard_element_id <> %s\n AND e.enabled_ind = 'Y'\n AND topic_id IN (select topic_id from dashboard_element_topic where dashboard_element_id = %s)\n UNION SELECT e.*\n FROM dashboard_element e, metric_drill_to_report m\n WHERE m.metric_element_id = e.element_id\n AND m.report_element_id = %s\n AND e.enabled_ind = 'Y'\n AND ifnull(e.segment_id,0) = %s\n \"\"\", (self._id, self._id, self._id, self._data['segment_id']))\n \n\n for related_element in self._db.record:\n if not related_element['segment_id']:\n related_element['segment_id'] = 0\n if related_element['segment_id'] == self._data['segment_id']:\n related_element['segment_value_id'] = self._segment_value_id\n else:\n related_element['segment_value_id'] = 0\n related.append(related_element)\n\n # elements displayed on the page\n before_dataset = list()\n after_dataset = list()\n \n charts_before_dataset = list()\n charts_after_dataset = list()\n \n \n # dataset table\n dataset_el = OrderedDict()\n dataset_el['element_id'] = ''\n dataset_el['element_type'] = 'dataset'\n dataset_el['element_name'] = ''\n dataset_el['element_desc'] = ''\n dataset_el['placement'] = ''\n dataset_el['sequence'] = 0\n dataset_el['show_ind'] = self._data['show_data_set_table_in_report_ind']\n \n \n # charts\n self._db.Query(\"\"\"SELECT *\n FROM report_data_set_chart \n WHERE \n `element_id`= %s\n AND \n (ISNULL(report_data_set_pivot_id)\n OR report_data_set_pivot_id = 0) \n ORDER BY display_sequence ASC\"\"\", (self._id, ))\n for chart in self._db.record:\n chart_el = OrderedDict()\n chart_el['element_id'] = chart['report_data_set_chart_id']\n chart_el['element_type'] = 'chart'\n chart_el['pivot_id'] = 0\n if chart['report_data_set_pivot_id']:\n chart_el['pivot_id'] = chart['report_data_set_pivot_id']\n chart_el['element_name'] = chart['name']\n chart_el['element_desc'] = chart['description']\n chart_el['placement'] = chart['chart_placement']\n chart_el['sequence'] = chart['display_sequence']\n chart_el['show_ind'] = chart['enabled_ind']\n if chart_el['placement'] == 'before table': \n charts_before_dataset.append(chart_el)\n else:\n charts_after_dataset.append(chart_el)\n \n # pivots\n self._db.Query(\"\"\"SELECT *\n FROM report_data_set_pivot\n WHERE\n `element_id`= %s\n ORDER BY display_sequence ASC\"\"\", (self._id, ))\n for pivot in self._db.record:\n before_pivot = list()\n after_pivot = list()\n #pivot_element = list()\n \n pivot_el = OrderedDict()\n pivot_el['element_id'] = pivot['report_data_set_pivot_id']\n pivot_el['element_type'] = 'pivot'\n pivot_el['element_name'] = pivot['name']\n pivot_el['element_desc'] = ''\n pivot_el['placement'] = pivot['pivot_table_report_placement']\n pivot_el['sequence'] = pivot['display_sequence']\n pivot_el['show_ind'] = pivot['enabled_ind']\n \n # charts\n self._db.Query(\"\"\"SELECT *\n FROM report_data_set_chart \n WHERE \n `element_id`= %s\n AND report_data_set_pivot_id = %s \n ORDER BY display_sequence ASC\"\"\",\n (self._id, pivot_el['element_id']))\n for chart in self._db.record:\n chart_el = OrderedDict()\n chart_el['element_id'] = chart['report_data_set_chart_id']\n chart_el['element_type'] = 'chart'\n chart_el['pivot_id'] = 0\n if chart['report_data_set_pivot_id']:\n chart_el['pivot_id'] = chart['report_data_set_pivot_id']\n chart_el['element_name'] = chart['name']\n chart_el['element_desc'] = chart['description']\n chart_el['placement'] = chart['chart_placement']\n chart_el['sequence'] = chart['display_sequence']\n chart_el['show_ind'] = chart['enabled_ind']\n if chart_el['placement'] == 'before table': \n before_pivot.append(chart_el)\n else:\n after_pivot.append(chart_el)\n pivot_element = before_pivot + [pivot_el] + after_pivot \n \n if pivot_el['placement'] == 'before data set':\n before_dataset += pivot_element\n else:\n after_dataset += pivot_element\n elements = charts_before_dataset + before_dataset + [dataset_el] + after_dataset + charts_after_dataset\n \n \n self._jfile.make_current_meta(last_data_set_instance,\n available_meas_times,\n available_intervals,\n drill_by,\n related,\n elements,\n self._segment_values)",
"def split(df, group):\n\n data = namedtuple(\"data\", [\"filename\", \"object\"]) #initiate \"data\" tyoe\n gb = df.groupby(group) #group df by group attribute\n return [\n data(filename, gb.get_group(x))\n for filename, x in zip(gb.groups.keys(), gb.groups)\n ]",
"def group_data():\n\n # Merge on Departure.\n\n # Merge on Arrival.\n\n data = pd.read_csv(path + \"/data/public/public_train.csv\")[[\"DateOfDeparture\", \"Arrival\"]]\n data['DateOfDeparture'] = pd.to_datetime(data['DateOfDeparture'])\n\n arrival = join_cleaned_data().\\\n rename(columns={'Date': 'DateOfDeparture', 'Airport': 'Arrival'}).\\\n set_index(\"DateOfDeparture\")\n\n merged_arrv = pd.merge(data, arrival, on=[\"DateOfDeparture\", \"Arrival\"], how=\"left\")\n\n # Rename and drop columns.\n\n merged_arrv.columns = [c + \"_Arrival\" if c not in [\"DateOfDeparture\",\n \"DateOfArrival\",\n \"Arrival\",\n \"WeeksToDeparture\"]\n else c\n for c in merged_arrv.columns]\n print merged_arrv\n merged_arrv = merged_arrv.drop([\"Arrival\"], axis=1)\n\n # Concatenate the two fields.\n # merged_all = pd.concat([merged_arrv, merged_dept], axis=1)\n\n merged_all = merged_arrv.\\\n convert_objects(convert_numeric=True)\n merged_all.to_csv(path + \"/Submission/temperatures.csv\")",
"def collect_data():\n\n \"Aqui va el codigo de alberto para recoger los datos que puede venir en forma de diccionario\"\n #TODO: Función para recoger los datos de los bms y meterlos en diccionarios (Alberto jr.)\n\n bms1 = dict()\n bms2 = dict()\n bms3 = dict()\n general = dict()\n\n\n # Ejemplos de datos para meter en los diccionarios\n\n temperature = 35.5\n voltage1 = 15.2\n voltage2 = 14.8\n date = time.strftime(\"%Y-%m-%d\") # Current date\n t = time.strftime(\"%H:%M:%S\") # Current time\n\n return bms1, bms2, bms3, general",
"def process_data(data, engine):\n\n def convert(x):\n unit = x['standard_units']\n value = x['standard_value']\n if unit == \"NM\":\n return value * 1e-9\n elif unit == \"-LOG(10) M\":\n return 10 ** (-value)\n else:\n raise RuntimeError\n\n # Filter Na\n data.dropna(how=\"any\", subset=USED_COLS, inplace=True)\n # Only keep measurements that are KD related\n data = data[data.standard_type.isin(KD_TYPES)]\n # Only keep measurements with some defined units\n data = data[data.standard_units.isin(KD_UNITS)]\n # Convert to M valued units\n data['standard_value'] = data.apply(convert, axis=1)\n # Keep only equal relation measurements\n data = data[data.standard_relation == '=']\n # Remove multiple targets measurements\n data = data[~data.target_id.str.contains(',')]\n # Remove (target,compound) pairs with more than one measurement\n key = ['standard_inchi_key', 'target_id']\n grouped = data.groupby(key).size()\n join_condition = grouped[grouped == 1].reset_index()[key]\n data = data.merge(join_condition, on=key, how='inner')\n # Remove outliers measurements\n data = data[(data.standard_value <= 1.7e-3) & (data.standard_value >= 1.e-10)]\n # Convert to PK values\n data['standard_value'] = - np.log10(data['standard_value'])\n # Remove samples for which the protein_id has no sequence\n sequence_loader = SequenceLoader(engine=engine)\n data = sequence_loader.transform(data).dropna(how=\"any\")\n # Remove samples for which the compound_id has no inchi\n inchi_loader = InchiLoader(engine=engine)\n data = inchi_loader.transform(data).dropna(how='any')\n # We will only use the following columns\n return data[[\"standard_inchi_key\", \"target_id\", \"standard_inchi\", \"sequence\", \"standard_value\"]]",
"def get_dataframe(self):\n for i, study_id in enumerate(self.studies_to_combine):\n copy = repr(self.original_study_location).strip(\"'\")\n study_location = copy.replace(\"MTBLS1\", study_id)\n\n for maf in self.sort_mafs(study_location, study_id):\n maf_temp = None\n try:\n maf_temp = pandas.read_csv(os.path.join(study_location, maf), sep=\"\\t\", header=0, encoding='unicode_escape')\n except pandas.errors.EmptyDataError as e:\n logger.error(f'EmptyDataError Issue with opening maf file {maf}: {str(e)}')\n self.unopenable_maf_register.append(maf)\n continue\n except Exception as e:\n logger.error(f'Issue with opening maf file {maf}, cause of error unclear: {str(e)}')\n self.unopenable_maf_register.append(maf)\n continue\n\n cleanup_function = getattr(DataFrameUtils, f'{self.method}_maf_cleanup')\n maf_temp = cleanup_function(maf_temp, study_id, maf)\n maf_as_dict = totuples(df=maf_temp, text='dict')['dict']\n\n yield maf_as_dict",
"def join_survey_data(survey, deezer):\n\n\n df = survey.rename(columns={'Age': 'user_age', 'Gender': 'user_gender',\n 'deezer_id': 'media_id'})\n\n for index, row in df.iterrows():\n if pd.isnull(row['time']):\n continue\n time = row['time'].split(',')\n if row['user_gender'] == 'Male':\n user_gender = 1\n else:\n user_gender = 0\n if time == None:\n if row['rating'] == 0:\n for i in [1480513129, 1479067262, 1478675619]:\n new = pd.DataFrame(np.array([[999999, i, row['media_id'],\n 999999, 0, 20001010, 1, 0,\n 999, 1, user_gender,\n row['user_id'], None,\n row['user_age'], 0]]),\n columns=['genre_id', 'ts_listen',\n 'media_id', 'album_id',\n 'context_type',\n 'release_date',\n 'platform_name',\n 'platform_family',\n 'media_duration',\n 'listen_type', 'user_gender',\n 'user_id', 'artist_id',\n 'user_age',\n 'is_listened'])\n deezer = deezer.append(new)\n elif 'Anytime' in time:\n for i in [1480513129, 1479067262, 1478675619]:\n new = pd.DataFrame(np.array([[999999, i, row['media_id'],\n 999999, 0, 20001010, 1, 0, 999,\n 1, user_gender,\n row['user_id'], None,\n row['user_age'], 0]]),\n columns=['genre_id', 'ts_listen',\n 'media_id', 'album_id',\n 'context_type',\n 'release_date', 'platform_name',\n 'platform_family',\n 'media_duration',\n 'listen_type', 'user_gender',\n 'user_id', 'artist_id', 'user_age',\n 'is_listened'])\n deezer = deezer.append(new)\n else:\n t_dict = {'Morning': 0, 'Afternoon': 0, 'Evening': 0}\n for t in time:\n t_dict[t] = 1\n for i in [('Morning', 1480513129), ('Afternoon', 1479067262),\n ('Evening', 1478675619)]:\n new = pd.DataFrame(np.array([[999999, i[1], row['media_id'],\n 999999, 0, 20001010, 1, 0, 999,\n 1, user_gender,\n row['user_id'], None,\n row['user_age'], t_dict[i[0]]]]),\n columns=['genre_id',\n 'ts_listen',\n 'media_id',\n 'album_id',\n 'context_type',\n 'release_date', 'platform_name',\n 'platform_family',\n 'media_duration',\n 'listen_type', 'user_gender',\n 'user_id', 'artist_id', 'user_age',\n 'is_listened'])\n deezer = deezer.append(new)\n\n return deezer",
"def splitByBand(reader):\r\n try:\r\n total_prod_cost = 0\r\n total_album_sales = 0\r\n len = 0\r\n\r\n for row in reader:\r\n theDict = d[row[bandCol]] # step1: gets the dictionary for the band\r\n\r\n al_sale = theDict[AlbumSales] # step2 gets the list of album sales for the band\r\n al_sale.append(float(row[album_sales_col])) #step3 appends current row value to the list\r\n theDict[AlbumSales] = al_sale #step4 updates the value to 'AlbumSales' key\r\n\r\n pc = theDict[ProdCost] #step5 gets the list of production cost for the band\r\n pc.append(float(row[prod_cost_col])) #step6 appends the current row value to the list\r\n theDict[ProdCost] = pc #step7 updates the value to 'ProdCost'\r\n d[row[bandCol]] = theDict #step8 updates value for d with updated theDict\r\n except Exception as e:\r\n print('Exception in splitByBand')\r\n raise e",
"def extract_data():\n raw_data = pd.read_csv(\"../../../resource/DataVisualization/vaccinations.csv\")\n raw_data = raw_data[[\"location\", \"date\", \"people_fully_vaccinated_per_hundred\"]]\n raw_data.date = pd.to_datetime(raw_data.date, format=\"%Y-%m-%d\")\n min_date = raw_data.date.min()\n raw_data.date = raw_data.date-min_date\n raw_data.date = pd.Series([x.days for x in raw_data.date])\n raw_data.drop(raw_data.loc[raw_data.people_fully_vaccinated_per_hundred.isnull()].index,\n axis=0, inplace=True)\n raw_data[\"people_fully_vaccinated_per_hundred\"] /= 100\n\n data_dict = dict()\n for country in raw_data.location.unique():\n if len(raw_data.loc[raw_data.location == country]) >= 100:\n tmp_data = raw_data.loc[raw_data.location == country]\n tmp_data.drop(\"location\", axis=1, inplace=True)\n data_dict[country] = {\"data\":tmp_data}\n else:\n raw_data.drop(raw_data.loc[raw_data.location ==\n country].index, inplace=True)\n return data_dict, min_date, raw_data",
"def train(self, metergroup):\n # Inizialise stats and training data:\n self.stats = []\n self.onpower_train = pd.DataFrame(columns=['onpower'])\n self.offpower_train = pd.DataFrame(columns=['offpower'])\n self.duration_train = pd.DataFrame(columns=['duration'])\n\n # Calling train_on_chunk by meter:\n instance = 1 # initial instance.\n for meter in metergroup.meters:\n for chunk in meter.power_series():\n if chunk.empty:\n print(\"Chunk empty\")\n else:\n print(\"Training on chunk\")\n if self.sampling_method is not None:\n how = lambda df: getattr(df, self.sampling_method)()\n else:\n how = lambda df: df.mean()\n \n self.train_on_chunk(how(pd.DataFrame(chunk.resample(\n self.sample_period))),\n meter\n )\n\n instance += 1",
"def splitter (data1, data2):\n flow_data = list()\n fare_record_data = list()\n\n for line in data1:\n line = [line[2:6],line[6:10],line[10:15],line[15:18],line[18],line[19],line[36:39],line[20:28],line[28:36],line[42:49]]\n flow_data.append(line)\n\n flow = pd.DataFrame(flow_data, columns=[\"ORIGIN_CODE\",\"DESTINATION_CODE\",\"ROUTE_CODE\",\"STATUS_CODE\",\"USAGE_CODE\",\"DIRECTION\",\"TOC\",\"VALID_UNTIL\",\"VALID_FROM\",\"FLOW_ID\"])\n flow['ROUTE_CODE'] = flow['ROUTE_CODE'].astype(object)\n flow.index.name=\"flow_idx\"\n\n for line in data2:\n line=[line[2:9],line[9:12],line[12:20]]\n fare_record_data.append(line)\n\n fare_record = pd.DataFrame(fare_record_data, columns=[\"FLOW_ID\",\"TICKET_CODE\",\"FARE\"])\n fare_record.index.name = \"fare_record_idx\"\n\n return flow,fare_record",
"def _dataframe_preprocess(self):\n # 1. add baisc feature like date, time in day, ....\n if self.data_type != 'porto':\n self.df['TIMESTAMP'] = self.df.apply(lambda df: df['TIMESTAMPS'][0], axis=1)\n self.df['TIME'] = pd.to_datetime(self.df['TIMESTAMP'], unit='s', utc=True)\n \n self.df.TIME = self.df.TIME.dt.tz_convert(self.timezone)\n # 2. group df for specific driver analysis\n self.grouped_df = self.df.groupby('LABEL')\n if self.count_od_info:\n if 'SD' not in self.df.columns:\n self._add_OD_info()\n self.grouped_od = self.df.groupby('SD')",
"def data_parser(df):\n\n chunks = []\n for row in df.itertuples():\n piece = {}\n piece['datetime'] = row[1]\n piece[row[3]] = row[4]\n chunks.append(piece)\n\n # Join dicts on shared 'datetime' keys.\n combine = defaultdict(dict)\n for elem in chunks:\n combine[elem['datetime']].update(elem)\n\n ordered = sorted(combine.values(), key=itemgetter(\"datetime\"))\n\n mapped_generation = []\n for item in ordered:\n mapped_types = [(mapping.get(k, k), v) for k, v in item.items()]\n\n # Need to avoid multiple 'unknown' keys overwriting.\n complete_production = defaultdict(lambda: 0.0)\n for key, val in mapped_types:\n try:\n complete_production[key] += val\n except TypeError:\n # Datetime is a string at this point!\n complete_production[key] = val\n\n dt = complete_production.pop('datetime')\n final = (dt, dict(complete_production))\n mapped_generation.append(final)\n\n return mapped_generation",
"def _get_data(self) -> dict:\n LOGGER.debug(f\"Setting data property for {self.dirname}\")\n data = {}\n for axis in range(1, 4):\n # Subsample by 8 since this does not vary quickly\n data[f\"aoatter{axis}\"] = (\n self.tlm[f\"aoatter{axis}\"].vals[::ATT_ERR_SUBSAMP].astype(np.float32)\n )\n data[\"aokalstr\"] = self.tlm[\"aokalstr\"].vals\n # fmt: off\n data[\"npnt_kalm\"] = (\n (self.tlm[\"aopcadmd\"].vals == \"NPNT\")\n & (self.tlm[\"aoacaseq\"].vals == \"KALM\")\n )\n # fmt: on\n for slot in range(8):\n data[f\"aca_track{slot}\"] = self.tlm[f\"aoacfct{slot}\"].vals == \"TRAK\"\n data[f\"aca_ir{slot}\"] = self.tlm[f\"aoaciir{slot}\"].vals == \"ERR\"\n data[\"times\"] = self.tlm[\"aokalstr\"].times\n data[\"perigee_times\"] = self.tlm.perigee_times.astype(np.float32)\n data[\"perigee\"] = self.perigee.date\n data[\"rad_entry\"] = self.rad_entry.date\n data[\"rad_exit\"] = self.rad_exit.date\n data[\"obss\"] = self.obss.as_array()\n\n return data",
"def load_meters_from_buliding(self, target_building, meters_name=[], sample_rate = '1min'):\n if self.df is None:\n self.read_data_from_csv()\n \n if len(meters_name) < 1 :\n meters_name = self.meter_name.keys()\n\n if 'main' in meters_name:\n meters_name.remove('main')\n \n building_meters = self.df.groupby('buildingid').get_group(target_building)\n building_meters.index = pd.to_datetime(building_meters['reporttime'], format='%Y-%m-%d %H:%M:%S')\n building_meters = building_meters.groupby('channelid')\n building_channels = building_meters.groups.keys()\n \n if self.meter_name['main'][0] not in building_channels: return\n buliding_df = building_meters.get_group(self.meter_name['main'][0]).rename(columns={\"w\": \"main\"})\n buliding_df = buliding_df.resample(sample_rate, how='mean')\n target_meters = ['main']\n\n for meter, channel_ids in self.meter_name.iteritems():\n if meter in meters_name and channel_ids[0] in building_channels:\n appliance_meter = building_meters.get_group(channel_ids[0]).rename(columns={\"w\": meter})\n \n for channel_id in channel_ids[1:]:\n if channel_id not in building_channels: continue\n another_channel = building_meters.get_group(channel_id).rename(columns={\"w\": meter})\n appliance_meter.append(another_channel)\n\n appliance_meter = appliance_meter.resample(sample_rate, how='mean')\n buliding_df = pd.merge(buliding_df, appliance_meter, right_index=True, left_index=True, how='left')\n target_meters.append(meter)\n \n buliding_df = buliding_df[target_meters]\n buliding_df = buliding_df[~buliding_df.index.duplicated()]\n if buliding_df is not None:\n self.buliding_df.setdefault(target_building, buliding_df)\n \n return buliding_df",
"def stats_data_by_type():\n photos = db_session.query(MediaFiles.size).filter(MediaFiles.duration == 0)\n videos = db_session.query(MediaFiles.size).filter(MediaFiles.duration > 0)\n result = [{'name': 'Photos', 'color': '#76BCEB',\n 'data': [photos.with_entities(func.sum(MediaFiles.size)).all()[0][0],\n photos.count()]},\n {'name': 'Videos', 'color': '#397DAA',\n 'data': [videos.with_entities(func.sum(MediaFiles.size)).all()[0][0],\n videos.count()]}]\n return result",
"def get_datalist_fr_json(self):\n raw_data = json.load(open(self.saved_json_file, 'r'))\n for indivdual_set in raw_data['query']['results']['stats']:\n temp_dict_data = {}\n if type(indivdual_set) == str:\n #for single data\n continue # temp do not use\n for parameters in indivdual_set.keys():\n if type(indivdual_set[parameters]) == str:\n temp_dict_data[parameters] = indivdual_set[parameters]#for symbol\n elif type(indivdual_set[parameters]) == dict:\n if indivdual_set[parameters].has_key('content'):\n temp_dict_data[parameters] = indivdual_set[parameters]['content']\n\n ## append to list\n self.com_data_allstock_list.append(temp_dict_data)",
"def structure_by_package(mel):\n \"\"\"receives in a pandas dataframe\"\"\"\n string='K10024-'\n WP='00'\n l={}\n mel['Level 1','Level 2','Level 3','Level 4']=''\n mel['WP']=mel['Level'].str.replace('.','',regex=True) \n for i,row in mel.iterrows():\n print (WP)\n if (type(row['WP Activity/ Part No.']) is str) and (string in row['WP Activity/ Part No.']) :\n #new section starts:\n WP=row['WP Activity/ Part No.']\n l[row['Level']]=row['Equipment Description']\n \n mel.loc[i,'WP']=WP\n for key in l.keys():\n mel.loc[i,'Level ' +key]=l[key]\n \n mel.dropna(subset=['Delivery','WP'], inplace=True)\n \n mel['WP']=mel['WP'].str.replace('K10024-','',regex=False) \n mel['WP']=mel['WP'].str[:2]\n mel.drop(columns=['Level'],inplace=True) \n mel.to_excel('packages_MEL02.xlsx')\n return mel",
"def _build_results(self):\n results = {}\n cols = []\n for pol in POLLUTANTS:\n for adj in ADJUSTMENTS:\n cols.append(get_rate_column(pol, adjustment=adj, generated=False))\n cols.append(get_column(pol, adjustment=adj))\n cols.append(\"net_consumed_mwh\")\n for ba in self.regions:\n results[ba] = pd.DataFrame(\n index=self.generation.index, columns=cols, dtype=np.float64\n )\n return results",
"def group_data_by_gs(data_table):\n gene_data = collections.defaultdict(lambda: collections.defaultdict(list))\n for _idx, row in data_table.iterrows():\n samp = row['sample']\n gene = row['gene']\n gene_data[gene][samp].append({\n 'muttype': row['type'].strip(),\n 'normalized': row['Normalized'], # NMAF in the manuscript\n 'consequence': row['MissenseConsequence'].strip(),\n })\n return gene_data",
"def _composition_handler(self):\n return {\n group : StockAnalyzer(data) \\\n for group, data in self.data.groupby(self.group_by)\n }",
"def get_all_data(ds_names, ds_types, indxs, fields, **kwargs):\n data = {f:{} for f in fields+['time']}\n\n for ds_type, keys in ds_types.items():\n for dsk in keys:\n print('Getting data for: ',dsk)\n\n dsf = ds_names[dsk]\n\n if ds_type == 'maven':\n ds = pd.read_csv(dsf)\n for field in fields:\n\n ds_dat = get_ds_data(ds, field, indxs[ds_type],\n maven=True, grid=False)\n data[field][dsk] = ds_dat\n time = get_ds_data(ds, 'time', indxs[ds_type],\n maven=True, grid=False)\n time = time-time[0]\n time = time/time[-1]\n data['time'][dsk] = time\n \n\n\n else:\n for field in fields:\n with h5py.File(dsf, 'r') as ds:\n \n if '_x' in field or '_y' in field or '_z' in field:\n get_data_func = get_rotated_data\n else: get_data_func = get_ds_data\n try:\n ds_dat = get_data_func(ds, field, indxs[ds_type],\n grid='batsrus' not in ds_type, **kwargs)\n #grid=ds_type=='heliosares', **kwargs)\n except ValueError:\n ds_dat = np.array([])\n data[field][dsk] = ds_dat\n\n data['time'][dsk] = np.linspace(0, 1, np.max(indxs[ds_type].shape))\n\n return data",
"def combine_record(self, dt, container = ''):\n \n record_dataset_legth ={} \n other_ds = []\n\n ''' I fill the dic e.g. record_dataset_legth{100:['era5_1','ncar'], 80:['bufr','igra2'] }\n i.e. the keys are the lengths, the entries are the lists of datasets '''\n\n duplicates = []\n\n for k in container.keys(): # loop over the dataset\n if k not in other_ds:\n other_ds.append(k)\n for f in container[k]: # loop over the file per dataset\n num_rec = len(container[k][f]['obs_tab'][\"date_time\"])\n \n \"\"\" Storing all the reports id with the proper prefix (for each different dataset) \"\"\"\n rep_id = b''.join(container[k][f][\"obs_tab\"]['report_id'][0]) \n rep_id = self.observation_ids_merged[k] + rep_id \n duplicates.append( rep_id ) \n \n if num_rec not in record_dataset_legth.keys():\n record_dataset_legth[num_rec] = {}\n record_dataset_legth[num_rec]['best_ds'] = []\n record_dataset_legth[num_rec]['file'] = []\n\n record_dataset_legth[num_rec]['best_ds'].append(k)\n record_dataset_legth[num_rec]['file'].append(f)\n\n max_entries = max(record_dataset_legth.keys())\n \n ''' best_ds is the list of longest datasets, best_datasets the list of all the datasets available including best_ds '''\n best_datasets = record_dataset_legth[max_entries]\n\n \"\"\" Choosing the priority of the datasets:\n - if era5_1 or era5_2 are present, pick them (they cant be both present for the same date_time)\n - else, if igra2 is present, pick it\n - else, one of the remaining ones \"\"\"\n\n if 'era5_2' in best_datasets and 'era5_1' not in best_datasets: # era5_1 and era5_2 should never be both present anyway...\n best_ds = 'era5_2' \n elif 'era5_1' in best_datasets and 'era5_2' not in best_datasets:\n best_ds = 'era5_1'\n elif 'era5_1' not in best_datasets and 'era5_2' not in best_datasets and 'igra2' in best_datasets:\n best_ds = 'igra2'\n elif 'era5_1' not in best_datasets and 'era5_2' not in best_datasets and 'igra2' not in best_datasets:\n best_ds = record_dataset_legth[max_entries]['best_ds'][0] # pick the first of the list \n\n best_file = record_dataset_legth[max_entries]['file'][0]\n\n ''' If more file are available for the same best_ds, pick the first one from the list '''\n selected_obstab, selected_era5fb = container[best_ds][best_file]['obs_tab'] , container[best_ds][best_file]['era5fb_tab']\n\n ''' Creating the correct observations and record ids. \n All the bytes variable are shrunk to a long |S1 byte variable type, otherwise \n writing in h5py will not work. '''\n \n for var in ['observation_id']:\n if type (selected_obstab[var] ) == np.ndarray and type (selected_obstab[var][0] ) == np.bytes_:\n selected_obstab[var] = np.array ([self.observation_ids_merged[best_ds] + b''.join(l) for l in selected_obstab[var] ] )\n elif type (selected_obstab[var] ) == np.ndarray and type (selected_obstab[var][0] ) == np.ndarray:\n selected_obstab[var] = np.array ([self.observation_ids_merged[best_ds] + b''.join(l) for l in selected_obstab[var][:] ] )\n\n for var in ['report_id']:\n val = selected_obstab[var][0]\n if type (selected_obstab[var] ) == np.ndarray and type (val) == np.bytes_:\n value = self.observation_ids_merged[best_ds] + b''.join(val) # it is the same for each row in the table\n elif type (selected_obstab[var] ) == np.ndarray and type (val) == np.ndarray:\n value = self.observation_ids_merged[best_ds] + b''.join(val) \n arr = np.full( (1, len( selected_obstab['date_time']) ) , value )[0] # np.full returns a list of lists\n\n selected_obstab[var] = arr\n\n\n for var in selected_era5fb.keys():\n if type (selected_era5fb[var]) == np.ndarray and type (selected_era5fb[var][0] ) == np.ndarray:\n try:\n selected_era5fb[var] = np.array( [b''.join(l) for l in selected_era5fb[var][:] ] )\n #print('MANAGED FFF', var)\n except:\n value = [b''.join(l) for l in selected_era5fb[var][0] ][0]\n #print('VALUE IS FFF', value)\n selected_era5fb[var] = np.array( (1, len( selected_obstab[var]) ) ).fill(value)\n\n \"\"\" Extracting the header \"\"\"\n selected_head = self.get_header_table(dt, ds = best_ds, File = best_file )\n for var in selected_head.keys():\n if type (selected_head[var] ) == np.ndarray and type (selected_head[var][0] ) == np.bytes_:\n selected_head[var] = np.array( [b''.join(l) for l in selected_head[var][:] ] )\n\n if 'best_ds' == 'era5_1' or best_ds == 'era5_2' :\n selected_obstab['advanced_assimilation_feedback'] = np.array([1]*len(selected_obstab['date_time']) )\n else:\n selected_obstab['advanced_assimilation_feedback'] = np.array([0]*len(selected_obstab['date_time']) )\n\n #best_ds_byte = np.bytes_(best_ds, ndtype = '|S10') # converting to bytes object\n best_ds_byte = np.bytes_(best_ds) # converting to bytes object \n arr = np.full( (1, len( selected_obstab['date_time']) ) , best_ds_byte )[0]\n selected_obstab['source_id'] = arr\n\n duplicate = b','.join(duplicates)\n #selected_head['duplicates'] = np.array(duplicate)\n\n duplicate = np.array(duplicate).astype(dtype='|S70')\n selected_head['duplicates'] = np.array([duplicate])\n selected_head['report_id'] = np.array([selected_obstab['report_id'][0]])\n selected_head['source_id'] = np.array([selected_obstab['source_id'][0]])\n selected_head['record_timestamp'] = np.array([selected_obstab['date_time'][0]])\n\n selected_file = np.bytes_(best_file.split('/')[-1])\n \n return best_ds, selected_obstab, selected_era5fb, selected_head, selected_file, best_file",
"def carrier_specific_to_dataset(model_run):\n carrier_tiers = model_run.sets[\"carrier_tiers\"]\n loc_tech_dict = {k: [] for k in model_run.sets[\"loc_techs_conversion_plus\"]}\n data_dict = dict()\n # Set information per carrier tier ('out', 'out_2', 'in', etc.)\n # for conversion-plus technologies\n if model_run.sets[\"loc_techs_conversion_plus\"]:\n # carrier ratios are the floating point numbers used to compare one\n # carrier_in/_out value with another carrier_in/_out value\n data_dict[\"carrier_ratios\"] = dict(\n dims=[\"carrier_tiers\", \"loc_tech_carriers_conversion_plus\"], data=[]\n )\n for carrier_tier in carrier_tiers:\n data = []\n for loc_tech_carrier in model_run.sets[\"loc_tech_carriers_conversion_plus\"]:\n loc, tech, carrier = loc_tech_carrier.split(\"::\")\n carrier_ratio = (\n model_run.locations[loc]\n .techs[tech]\n .constraints.get_key(\n \"carrier_ratios.carrier_\" + carrier_tier + \".\" + carrier, 1\n )\n )\n data.append(carrier_ratio)\n loc_tech_dict[loc + \"::\" + tech].append(carrier_ratio)\n data_dict[\"carrier_ratios\"][\"data\"].append(data)\n\n # Additional system-wide constraints from model_run.model\n if model_run.model.get(\"reserve_margin\", {}) != {}:\n data_dict[\"reserve_margin\"] = {\n \"data\": [\n model_run.model.reserve_margin.get(c, np.nan)\n for c in model_run.sets[\"carriers\"]\n ],\n \"dims\": \"carriers\",\n }\n\n return data_dict",
"def __object_demapper(self, data: list) -> pd.DataFrame:\n data = pd.DataFrame.from_records([s.to_dict() for s in data])\n\n return data",
"def combine_data(spectras, compounds) :\n final = {}\n for hmdb_id, spec_objs in spectras.items() :\n c = compounds.pop(hmdb_id, None)\n if not c :\n continue\n c.spectras = spec_objs\n final[hmdb_id] = c\n return final"
] | [
"0.54995227",
"0.5438723",
"0.5368502",
"0.5285218",
"0.5209991",
"0.5202449",
"0.51980925",
"0.5175748",
"0.5165017",
"0.5150522",
"0.51441115",
"0.5076895",
"0.50656265",
"0.50650203",
"0.5061612",
"0.5057883",
"0.50543916",
"0.5050856",
"0.49904716",
"0.4989884",
"0.49895522",
"0.4975154",
"0.49727502",
"0.49721086",
"0.49554804",
"0.4952854",
"0.494106",
"0.49159202",
"0.49117824",
"0.4905359"
] | 0.806629 | 0 |
Reset the state of the evironment for a new episode `setup` is used to let the reset function know when we're calling it from `setup`. If we don't, the 'random' init scheme should reset to the randomly choosen position instead of picking a new random one. | def reset(self, setup=False):
self._done = False
self._nbSteps = 0
x = None
if (self.startPosX == 'random' and setup) or (
self.startPosX == 'episodeRandom'):
x = random.randint(0, self._width - 1)
elif (self.startPosX == 'random' and not setup):
x = self._initState[0]
elif self.startPosX == 'center':
x = self._width - 1
else:
x = int(self.startPosX)
y = None
if (self.startPosX == 'random' and setup) or (
self.startPosX == 'episodeRandom'):
y = random.randint(0, self._height - 1)
elif (self.startPosY == 'random' and not setup):
y = self._initState[1]
elif self.startPosX == 'center':
y = self._height - 1
else:
y = int(self.startPosX)
self._currentPos = (x, y)
self._trajectory = [(x, y)]
return (x, y) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def reset(self):\n\n # initialize gym env variables\n self.finish = False\n self.curr_step = -1\n self.curr_episode += 1\n\n # initialize target position\n self.target = np.random.uniform(-10.0,10.0,size=(2))\n\n # initialize sheep positions\n if self.fixed_reset:\n init_sheep_pose = np.array([75.0, 75.0])\n self.sheep_poses = (np.random.uniform(-50.0, 50.0, \n size=(self.num_sheep,2))) + init_sheep_pose[None,:]\n else:\n init_sheep_pose = np.random.uniform(-self.init_sheep_root, \n self.init_sheep_root, size=(2))\n self.sheep_poses = (np.random.uniform(-self.init_sheep_range, \n self.init_sheep_range, size=(self.num_sheep,2))) \\\n + init_sheep_pose[None,:]\n self.sheep_com = self.sheep_poses.mean(axis=0)\n\n # get the farthest sheep and radius of the sheep\n dist_to_com = np.linalg.norm((self.sheep_poses - self.sheep_com[None,:]), axis=1)\n self.farthest_sheep = self.sheep_poses[np.argmax(dist_to_com),:]\n self.radius_sheep = np.array([np.max(dist_to_com)])\n\n # update distance to target\n self.target_distance = np.linalg.norm(self.target - self.sheep_com)\n\n # initialize values for reward estimation\n self.init_radius_sheep = self.radius_sheep\n self.init_target_distance = self.target_distance\n\n # initialize dog position\n if self.fixed_reset:\n init_dog_pose = np.array([0.0,75.0])\n else:\n init_theta = np.random.uniform(-np.pi,np.pi)\n init_dog_pose = init_sheep_pose + self.init_dog_distance*np.array([np.cos(init_theta), \n np.sin(init_theta)])\n self.dog_pose = init_dog_pose\n\n # initialize inertia\n self.inertia = np.ones((self.num_sheep, 2))\n\n # initialize episode reward and length\n self.episode_reward = 0\n self.episode_length = 0\n\n # get the state, reward, finish, info\n state = self._get_state()\n \n return state",
"def _hard_reset(self):\n self._reset_specific_envs(np.ones_like(self.episodes_done))\n self._update_other_info()",
"def _reset(self):\n np.random.shuffle(self.id)\n self.episode_step = 0 # Reset episode step counter at the end of every episode\n self._state = self.X_train[self.id[self.episode_step]]\n self._episode_ended = False\n\n return ts.restart(self._state)",
"def reset(self):\n self.tracker.reset()\n self.episode += 1\n self.episode_step = 0",
"def reset(self, **kwargs):\n if self._backend_agent:\n self._backend_agent._on_gym_reset_begin(self, **kwargs)\n\n result = self.env.reset(**kwargs)\n if self.steps_done_in_episode > 0 and not self.is_episode_done:\n self.episodes_done += 1\n self.total.episodes_done_inc()\n self.is_episode_done = False\n self.steps_done_in_episode = 0\n\n if self._backend_agent:\n self._backend_agent._on_gym_reset_end(self, result, **kwargs)\n return result",
"def reset(self):\n \n if self._config.fix_seed:\n self._init_seed = (self._init_seed + 1) % 2**32 # set_seed requires int\n self.game.set_seed(self._init_seed)\n\n super(ShootEnv, self).reset()\n\n self._killcount = 0.0\n self._ammo = self.game.get_game_variable(GameVariable.AMMO2)\n self._health = self.game.get_game_variable(GameVariable.HEALTH)\n\n return self._get_observation()",
"def reset(self):\n\n self.curr_episode += 1\n self.curr_step = 0\n\n self.action_episode_memory.append([])\n self.rewards.append([])\n\n self.is_finalized = False\n init_state, init_reward = self._take_action(5 * np.random.randn(self.act_dimension))\n self.initial_conditions.append(init_state)\n return init_state",
"def specific_reset(self) -> None:\n self.agent.specific_reset() # reset joints\n new_pos = self.agent.init_xyz\n new_pos[:2] = np.random.uniform(-0.01, 0.01, 2)\n self.agent.set_position(new_pos)\n self.old_potential = self.calculate_task_potential()",
"def reset(self):\n while not self._check_episode_start_condition():\n self._simulate()\n self.state, _ = self._extract_state()\n return self.state",
"def reset(\n self,\n *,\n seed: int | None = None,\n options: dict[str, Any] | None = None,\n ) -> tuple[np.ndarray, AtariEnvStepMetadata]:\n super().reset(seed=seed, options=options)\n del options\n # Gymnasium's new seeding API seeds on reset.\n # This will cause the console to be recreated\n # and loose all previous state, e.g., statistics, etc.\n seeded_with = None\n if seed is not None:\n seeded_with = self.seed(seed)\n\n self.ale.reset_game()\n obs = self._get_obs()\n\n info = self._get_info()\n if seeded_with is not None:\n info[\"seeds\"] = seeded_with\n return obs, info",
"def _soft_reset(self):\n self._reset_specific_envs(self.episodes_done)\n self._update_other_info()",
"def _reset_seeds(self) -> None:\n self._seeds = [None for _ in range(self.num_envs)]",
"def reset_from_state(self, state):\n\n # initialize gym env variables\n self.finish = False\n self.curr_step = -1\n self.curr_episode += 1\n\n # initialize target position\n self.target = state[4:6]\n\n # initialize sheep com\n self.sheep_com = state[0:2]\n\n # get the farthest sheep and radius of the sheep\n self.farthest_sheep = state[2:4]\n self.radius_sheep = np.array([state[8]])\n\n # update distance to target\n self.target_distance = np.array([state[9]])\n\n # initialize sheep position\n self.sheep_poses = (np.random.uniform(-0.75*self.radius_sheep, \n 0.75*self.radius_sheep, size=(self.num_sheep,2))) \\\n + self.sheep_com[None,:]\n rnd_ind = np.random.choice(self.num_sheep)\n self.sheep_poses[rnd_ind,:] = state[2:4]\n\n # initialize values for reward estimation\n self.init_radius_sheep = self.radius_sheep\n self.init_target_distance = self.target_distance\n\n # initialize dog position\n init_dog_pose = state[6:8]\n self.dog_pose = init_dog_pose\n\n # initialize inertia\n self.inertia = np.ones((self.num_sheep, 2))\n\n # initialize episode reward and length\n self.episode_reward = 0\n self.episode_length = 0\n\n # get the state, reward, finish, info\n state = self._get_state()\n \n return state",
"def _reset(self):\r\n \r\n airgym.reset()\r\n self.stepN = 0\r\n self.episodeN += 1\r\n \r\n self.allLogs = { 'reward': [0] }\r\n self.allLogs['distance'] = [221]\r\n self.allLogs['action'] = [1]\r\n \r\n print(\"\")\r\n \r\n #self.sensors = airgym.getSensorStates()\r\n \r\n # Initial state\r\n self.state = airgym.getScreenDepthVis()\r\n \r\n \r\n return self.state",
"def reset(self):\r\n \r\n self.done = False\r\n self.t = 0\r\n self.episode = random.choice(episodes)\r\n\r\n # initiate agent\r\n self.agent = self.create_agent(Agent)\r\n \r\n # initiate state at time zero\r\n self.state = (self.episode[self.t]['ST Relative Indicator'], \r\n self.episode[self.t]['ST Relative Indicator'], \r\n self.agent.stock,\r\n self.t)\r\n \r\n return self.state",
"def _reset(self): # We are using a virtual function defined in the gym infrastructure.\n self.gazebo.unpauseSim()\n \"\"\"\n why we need to unpauseSim because resetting controllers and for checking the sensors, we need the simulation\n to be running because otherwise we don't have any sensory data and we don't have access to the controller reset\n functions services they won't work and tell you to hit play. => it is very important.\n \"\"\"\n self.controllers_object.reset_controllers()\n self.check_all_sensors_ready()\n self.set_init_pose()\n #initialized robot\n self.gazebo.pauseSim()\n self.gazebo.resetSim()\n self.gazebo.unpauseSim()\n self.controllers_object.reset_controllers()\n self.check_all_sensors_ready()\n self.gazebo.pauseSim()\n self.init_env_variables()\n obs = self._get_obs()\n simplified_obs = self.convert_obs_to_state(obs)\n\n return simplified_obs",
"def resetEyes(self):\n\n\t\tself.leds.on(\"FaceLeds\")",
"def resetEyes(self):\n\n\t\tself.leds.on(\"FaceLeds\")",
"def reset(self):\n self.curr_episode += 1\n self.action_episode_memory.append([])\n self.is_game_done = False\n self.price = 1.00\n self.sendCmd(self.url,\"reset\")\n return self._get_state()",
"def env_init(self, env_info={}):\n self.dealer_sticks = env_info['dealer_sticks']\n self.random = np.random.RandomState(env_info['seed'])\n self.current_state = None",
"def _reset_(self):\n print(\"Resetting\")\n\n self._q_target_, x_target = self._pick_random_angles_()\n np.copyto(self._x_target_, x_target)\n if self._target_type == 'position':\n self._target_ = self._x_target_[self._end_effector_indices]\n elif self._target_type == 'angle':\n self._target_ = self._q_target_\n self._action_ = self._rand_obj_.uniform(self._action_low, self._action_high)\n self._cmd_prev_ = np.zeros(len(self._action_low)) # to be used with derivative control of velocity\n if self._reset_type != 'none':\n if self._reset_type == 'random':\n reset_angles, _ = self._pick_random_angles_()\n elif self._reset_type == 'zero':\n reset_angles = self._q_ref[self._joint_indices]\n self._reset_arm(reset_angles)\n\n rand_state_array_type, rand_state_array_size, rand_state_array = utils.get_random_state_array(\n self._rand_obj_.get_state()\n )\n np.copyto(self._shared_rstate_array_, np.frombuffer(rand_state_array, dtype=rand_state_array_type))\n\n print(\"Reset done\")",
"def reset(self):\n self.tracker.reset()\n self.total_max_q = 0.0\n self.episode_step = 0\n self.episode += 1",
"def reset(self, gui=False, test_ind=-1):\n # self.gui = gui\n # if gui:\n # # save episode to disk\n # if self._global_frames:\n # make_video_from_rgb_imgs(self._global_frames, self.output_path, f\"episode_global_{self.cur_episode}\")\n\n # for agent_id, frames in self._agent_frames.items():\n # if frames:\n # make_video_from_rgb_imgs(frames, self.output_path, f\"episode_{self.cur_episode}_{agent_id}\")\n\n # # clear frames of previous episode\n # self._global_frames = []\n # self._agent_frames = {agent_id: [] for agent_id in self.agent_tags}\n\n if (self.train_mode):\n seed = self.seed\n elif (test_ind < 0):\n seed = self.seed-1\n else:\n seed = self.test_seeds[test_ind]\n np.random.seed(seed)\n self.seed += 1\n\n self.cur_episode += 1\n self.t = 0 # step counter for each episode\n self.rewards = [0] # to keep track of global rewards\n obs = self.env.reset(done_only=False).cpu().numpy()\n\n # if self.gui:\n # self._global_frames.append(self.env.map_to_colors().astype(np.uint8))\n\n # for agent_id, agent_obs in obs.items():\n # self._agent_frames[agent_id].append(agent_obs.astype(np.uint8))\n\n # obs = list(obs.values())\n obs = self._get_state(obs) # new\n\n return obs",
"def reset(self):\n # Sample random state from initial state distribution\n self._cur_state = self._sample_state(self._mdp.I)\n self._prev_state = self._cur_state",
"def reset(self):\n self.observation = None\n self.episode_done = True",
"def _restart_environment_episode(self, force_environment_reset=False) -> None:\n raise NotImplementedError(\"\")",
"def reset(self):\n\n # Ending variables\n self.time_idle = 0\n self.time_episode = 0\n self.done_time_idle = False\n self.done_falling = False\n self.done_time_episode = False\n\n # hero variables\n self.last_location = None\n self.last_velocity = 0\n\n # Sensor stack\n self.prev_image_0 = None\n self.prev_image_1 = None\n self.prev_image_2 = None\n\n self.last_heading_deviation = 0",
"def specific_reset(self) -> None:\n self.old_velocity = 0.\n self.agent.specific_reset()\n max_dist_to_origin = 4.\n min_dist_to_origin = 2\n\n agent_pos = np.random.uniform(-max_dist_to_origin, max_dist_to_origin, 2)\n positioning_done = False\n while not positioning_done:\n agent_pos = np.random.uniform(-max_dist_to_origin,\n max_dist_to_origin, 2)\n if min_dist_to_origin <= np.linalg.norm(agent_pos) <= max_dist_to_origin:\n positioning_done = True\n\n # adjust the height of agent\n agent_pos = np.concatenate((agent_pos[:2], [self.agent.init_xyz[2]]))\n self.agent.set_position(agent_pos)\n\n # set agent orientation in forward run direction\n y = angle2pos(self.agent.get_position(), np.zeros(3)) + np.pi / 2\n y += self.agent.init_rpy[2]\n quaternion = self.bc.getQuaternionFromEuler([0, 0, y])\n self.agent.set_orientation(quaternion)",
"def seed_random():\n random.seed(0)",
"def reset(self):\r\n random.seed(1) # Set determanistic play\r\n if self.board is None:\r\n self.setup_board()\r\n self.board.reset()"
] | [
"0.66944516",
"0.6647286",
"0.65143913",
"0.6312656",
"0.6290814",
"0.62830704",
"0.6277763",
"0.62062037",
"0.61978877",
"0.6173854",
"0.61685586",
"0.61399466",
"0.613353",
"0.61265284",
"0.6108667",
"0.6104944",
"0.60824627",
"0.60824627",
"0.6075161",
"0.6065723",
"0.60425687",
"0.6032285",
"0.60291636",
"0.5983385",
"0.59776616",
"0.59693563",
"0.5967431",
"0.59631056",
"0.5941566",
"0.59100235"
] | 0.6798604 | 0 |
Make sure the netcdf cc data handler operates correctly | def test_data_handling_nc_cc():
input_files = [os.path.join(TEST_DATA_DIR, 'ua_test.nc'),
os.path.join(TEST_DATA_DIR, 'va_test.nc'),
os.path.join(TEST_DATA_DIR, 'orog_test.nc'),
os.path.join(TEST_DATA_DIR, 'zg_test.nc')]
with xr.open_mfdataset(input_files) as fh:
min_lat = np.min(fh.lat.values)
min_lon = np.min(fh.lon.values)
target = (min_lat, min_lon)
plevel = fh.plev[-1]
ua = np.transpose(fh['ua'][:, -1, ...].values, (1, 2, 0))
va = np.transpose(fh['va'][:, -1, ...].values, (1, 2, 0))
handler = DataHandlerNCforCC(input_files, features=['U_100m', 'V_100m'],
target=target, shape=(20, 20),
val_split=0.0,
worker_kwargs=dict(max_workers=1))
assert handler.data.shape == (20, 20, 20, 2)
handler = DataHandlerNCforCC(input_files,
features=[f'U_{int(plevel)}pa',
f'V_{int(plevel)}pa'],
target=target, shape=(20, 20),
val_split=0.0,
worker_kwargs=dict(max_workers=1))
if handler.invert_lat:
handler.data = handler.data[::-1]
assert handler.data.shape == (20, 20, 20, 2)
assert np.allclose(ua, handler.data[..., 0])
assert np.allclose(va, handler.data[..., 1]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _check_data(self):\n # ================ CHECK DATA / CONNECT / SELECT ================\n N = self.xyz.shape[0]\n # Chech array :\n if (self.connect.shape != (N, N)) or not isinstance(self.connect,\n np.ndarray):\n raise ValueError(\"c_connect must be an array of \"\n \"shape \" + str((N, N)))\n if self.select is None:\n self.select = np.ones_like(self.connect)\n if (self.select.shape != (N, N) or not isinstance(self.select,\n np.ndarray)):\n raise ValueError(\"c_select must be an array of \"\n \"shape \" + str((N, N)))\n # Mask c_connect :\n try:\n self.connect.mask\n except:\n self.connect = np.ma.masked_array(self.connect, mask=True)\n self.connect.mask[self.select.nonzero()] = False\n # Use specific color values :\n if (self.colval is not None) and isinstance(self.colval, dict):\n mask = np.ones_like(self.connect.mask)\n for k, v in zip(self.colval.keys(), self.colval.values()):\n mask[self.connect.data == k] = False\n self.colval[k] = color2vb(v)\n self.connect.mask = mask\n\n # ================ CHECK COLOR ================\n # Check colorby :\n if self.colorby not in ['count', 'strength', 'density']:\n raise ValueError(\"The c_colorby parameter must be 'count', \"\n \"'strength' or 'density'\")\n # Test dynamic :\n if (self.dynamic is not None) and not isinstance(self.dynamic, tuple):\n raise ValueError(\"c_dynamic bust be a tuple\")\n\n # ================ NON-ZERO INDICES ================\n # Find where there is non-masked connections :\n self._nnz_x, self._nnz_y = np.where(~self.connect.mask)\n self._indices = np.c_[self._nnz_x, self._nnz_y].flatten()\n self._Nindices = np.arange(len(self._indices))\n # Build position array :\n self.a_position = np.zeros((2*len(self._nnz_x), 3), dtype=np.float32)\n self.a_position[self._Nindices, :] = self.xyz[self._indices, :]",
"def test_solar_cc():\n\n features = ['clearsky_ratio', 'rsds', 'clearsky_ghi']\n input_files = [os.path.join(TEST_DATA_DIR, 'rsds_test.nc')]\n nsrdb_source_fp = os.path.join(TEST_DATA_DIR, 'test_nsrdb_co_2018.h5')\n\n with xr.open_mfdataset(input_files) as fh:\n min_lat = np.min(fh.lat.values)\n min_lon = np.min(fh.lon.values) - 360\n target = (min_lat, min_lon)\n shape = (len(fh.lat.values), len(fh.lon.values))\n\n with pytest.raises(AssertionError):\n handler = DataHandlerNCforCC(input_files, features=features,\n target=target, shape=shape,\n val_split=0.0,\n worker_kwargs=dict(max_workers=1))\n\n handler = DataHandlerNCforCC(input_files, features=features,\n nsrdb_source_fp=nsrdb_source_fp,\n target=target, shape=shape,\n temporal_slice=slice(0, 1),\n val_split=0.0,\n worker_kwargs=dict(max_workers=1))\n\n cs_ratio = handler.data[..., 0]\n ghi = handler.data[..., 1]\n cs_ghi = handler.data[..., 2]\n cs_ratio_truth = ghi / cs_ghi\n\n assert cs_ratio.max() < 1\n assert cs_ratio.min() > 0\n assert (ghi < cs_ghi).all()\n assert np.allclose(cs_ratio, cs_ratio_truth)\n\n with Resource(nsrdb_source_fp) as res:\n meta = res.meta\n tree = KDTree(meta[['latitude', 'longitude']])\n cs_ghi_true = res['clearsky_ghi']\n\n # check a few sites against NSRDB source file\n for i in range(4):\n for j in range(4):\n test_coord = handler.lat_lon[i, j]\n _, inn = tree.query(test_coord)\n\n assert np.allclose(cs_ghi_true[0:48, inn].mean(), cs_ghi[i, j])",
"def test_ccds(self):\n #TODO write ccds tests",
"def __init__(self, datafile):\r\n super(type(self), self).__init__()\r\n self.data = None\r\n log.info(\"Trying to read netcdf \"+datafile)\r\n try:\r\n # We have to uncompress the file to a temporary file\r\n # This will be deleted on __del__ cleanup\r\n datafile = super(type(self), self).uncompressTempFile(datafile)\r\n self.data = Scientific.IO.NetCDF.NetCDFFile(datafile, \"r\")\r\n except BaseException as e:\r\n log.error(\"Couldn't read netcdf data from file \"+datafile)\r\n log.error(\"Exception {0}\".format(e))",
"def read_netcdf(self,filename):",
"def sanity_check_step(self):\n\n incs = [\"netcdf.h\"]\n libs = [\"libnetcdf.so\", \"libnetcdf.a\"]\n # since v4.2, the non-C libraries have been split off in seperate extensions_step\n # see netCDF-Fortran and netCDF-C++\n if LooseVersion(self.version) < LooseVersion(\"4.2\"):\n incs += [\"netcdf%s\" % x for x in [\"cpp.h\", \".hh\", \".inc\", \".mod\"]] + \\\n [\"ncvalues.h\", \"typesizes.mod\"]\n libs += [\"libnetcdf_c++.so\", \"libnetcdff.so\",\n \"libnetcdf_c++.a\", \"libnetcdff.a\"]\n\n custom_paths = {\n 'files': [\"bin/nc%s\" % x for x in [\"-config\", \"copy\", \"dump\",\n \"gen\", \"gen3\"]] +\n [\"lib/%s\" % x for x in libs] +\n [\"include/%s\" % x for x in incs],\n 'dirs': []\n }\n\n super(EB_netCDF, self).sanity_check_step(custom_paths=custom_paths)",
"def __init__(self):\n self.datasets = [\"ISCCP\",\"ISCCP_raw\",\"PATMOSX\",\"PATMOSX_raw\"]\n f = cdms.open(\"OBS/clt_ISCCP_corrected_198301-200912.nc\")\n fp = cdms.open(\"OBS/clt_PATMOSX_corrected_198301-200912.nc\")\n \n f_old = cdms.open(\"OBS/clt_ISCCP_198307-200806.nc\")\n fp_old = cdms.open(\"OBS/clt_PATMOSX_198200-200912.nc\")\n\n fgpcp = cdms.open(\"OBS/GPCP.precip.mon.mean.nc\")\n fcmap = cdms.open(\"OBS/CMAP.std.precip.mon.mean.nc\")\n \n \n self.ISCCP = f(\"clt\",time=('1984-1-1','2009-12-31'))\n self.ISCCP = MV.masked_where(np.isnan(self.ISCCP),self.ISCCP)\n cdutil.setTimeBoundsMonthly(self.ISCCP)\n\n self.PATMOSX = fp(\"clt\",time=('1984-1-1','2009-12-31'))\n self.PATMOSX = MV.masked_where(np.isnan(self.PATMOSX),self.PATMOSX)\n cdutil.setTimeBoundsMonthly(self.PATMOSX)\n\n self.ISCCP_raw = f_old(\"clt\",time=('1984-1-1','2008-6-31'))\n self.ISCCP_raw = MV.masked_where(np.isnan(self.ISCCP_raw),self.ISCCP_raw)\n cdutil.setTimeBoundsMonthly(self.ISCCP_raw)\n\n self.PATMOSX_raw = fp_old(\"clt\",time=('1982-1-1','2009-12-31'))\n self.PATMOSX_raw = MV.masked_where(np.isnan(self.PATMOSX_raw),self.PATMOSX_raw)\n cdutil.setTimeBoundsMonthly(self.PATMOSX_raw)\n\n self.GPCP = cdutil.averager(fgpcp(\"precip\",time=('1979-1-1','2014-12-31'),latitude=(-90,90)),axis='x')\n cdutil.setTimeBoundsMonthly(self.GPCP)\n self.CMAP = cdutil.averager(fcmap(\"precip\",time=('1979-1-1','2014-12-31'),latitude=(-90,90)),axis='x')\n self.CMAP.setAxis(0,self.GPCP.getTime())\n cdutil.setTimeBoundsMonthly(self.CMAP)",
"def have_cdc() -> bool:",
"def __init__(self, datafile):\r\n super(type(self), self).__init__()\r\n log.info(\"Trying to read netcdf \"+datafile)\r\n try:\r\n # We have to uncompress the file to a temporary file\r\n # This will be deleted on __del__ cleanup\r\n datafile = \\\r\n netcdf_reader.netcdfReader.uncompressTempFile(self, datafile) \r\n self.data = arcpy.NetCDFFileProperties(datafile)\r\n except BaseException as e:\r\n log.error(\"Couldn't read netcdf data from file \"+datafile)\r\n log.error(\"Exception {0}\".format(e))\r\n \r\n def __del__(self):\r\n # Close our file before calling superclass, since superclass might delete it\r\n if self.data:\r\n self.data.close()\r\n super(type(self), self).__del__()",
"def write_netcdf(file,xc,xc_bnd,yc,yc_bnd,times,hydrographs,fractions,loc,Flist,velocity,diffusion,NODATA,verbose):\n \n f = Dataset(file,'w', format='NETCDF4')\n\n # set dimensions\n time = f.createDimension('time', None)\n x = f.createDimension('x',xc.shape[1])\n y = f.createDimension('y',xc.shape[0])\n nv4 = f.createDimension('nv4',4)\n\n # initialize variables\n time = f.createVariable('time','f8',('time',))\n xcs = f.createVariable('xc','f8',('y','x',))\n ycs = f.createVariable('yc','f8',('y','x',))\n xc_bnds = f.createVariable('xc_bnds','f8',('y','x','nv4',))\n yc_bnds = f.createVariable('yc_bnds','f8',('y','x','nv4',))\n fraction = f.createVariable('fraction','f8',('y','x',),fill_value=NODATA)\n UHS = f.createVariable('unit_hydrograph','f8',('time','y','x',),fill_value=NODATA)\n\n # write attributes for netcdf\n f.description = 'Aggregated UH_S and Fraction Vars for full RASM domain'\n f.history = 'Created: {}\\n'.format(tm.ctime(tm.time()))\n f.history += ' '.join(sys.argv) + '\\n'\n f.source = sys.argv[0] # prints the name of script used\n f.velocity = velocity\n f.diffusion = diffusion\n f.outlet_lon = loc[0]\n f.outlet_lat = loc[1]\n f.includes = str(len(Flist))+' files'\n\n ycs.long_name = 'latitude of grid cell center'\n ycs.standard_name = 'latitude'\n ycs.units = 'degrees_north'\n ycs._CoordinateAxisType = 'Lat'\n ycs.bounds = 'yc_bnds'\n\n xcs.long_name = 'longitude of grid cell center'\n xcs.standard_name = 'longitude'\n xcs.units = 'degrees_east'\n xcs._CoordinateAxisType = 'Lon'\n xcs.bounds = 'xc_bnds'\n\n time.standard_name = 'time'\n time.units = 'seconds'\n time.description = 'Seconds since initial impulse'\n time.calendar = 'proleptic_gregorian'\n\n UHS.units = 'unitless'\n UHS.description = 'unit hydrograph for each grid cell with respect to basin outlet location'\n \n fraction.units = 'unitless'\n fraction.description = 'fraction of grid cell contributing to guage location'\n\n # write data to variables initialized above\n time[:]= times\n xcs[:,:] = xc\n ycs[:,:] = yc\n xc_bnds[:,:,:] = xc_bnd\n yc_bnds[:,:,:] = yc_bnd\n UHS[:,:,:] = hydrographs\n fraction[:,:]= fractions\n f.close()\n\n return",
"def inputs_netCDF(ID, fname, data):\n\n from netCDF4 import Dataset #, date2num, num2date\n from datetime import datetime\n\n print('**** creating SpaFHy input netCDF4 file: ' + fname + ' ****')\n \n # create dataset & dimensions\n ncf = Dataset(fname, 'w')\n ncf.description = 'SpatialData from : ' + str(ID)\n ncf.history = 'created ' + datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n ncf.source = 'SpaFHy v.1.0 inputs'\n \n dlat, dlon = np.shape(data['cmask'])\n\n ncf.createDimension('dlon', int(dlon))\n ncf.createDimension('dlat', int(dlat))\n ncf.createDimension('scalar', 1)\n\n # create variables \n # call as createVariable(varname,type,(dimensions))\n cellsize = ncf.createVariable('cellsize', 'f4', ('scalar',))\n cellsize.units = 'm'\n lat = ncf.createVariable('lat', 'f4', ('dlat',))\n lat.units = 'ETRS-TM35FIN'\n lon = ncf.createVariable('lon', 'f4', ('dlon',))\n lon.units = 'ETRS-TM35FIN'\n\n cellsize[0] = data['cellsize']\n lon[:] = data['lon0']\n lat[:] = data['lat0']\n \n # required inputs\n cmask = ncf.createVariable('cmask', 'i4', ('dlat','dlon',))\n cmask.units = 'integer inside catchment, Nan outside'\n LAI_conif = ncf.createVariable('LAI_conif', 'f4', ('dlat','dlon',))\n LAI_conif.units = 'conifer LAI (m2m-2)'\n LAI_decid = ncf.createVariable('LAI_decid', 'f4', ('dlat','dlon',))\n LAI_decid.units = 'deciduous annual max LAI (m2m-2)' \n hc = ncf.createVariable('hc', 'f4', ('dlat','dlon',))\n hc.units = 'canopy height m' \n cf = ncf.createVariable('cf', 'f4', ('dlat','dlon',))\n cf.units = 'canopy closure (-)' \n \n soilclass = ncf.createVariable('soilclass', 'i4', ('dlat','dlon',))\n soilclass.units = 'soil class (1 - 5)'\n \n flowacc = ncf.createVariable('flowacc', 'f4', ('dlat','dlon',))\n flowacc.units = 'flow accumualtion area m2'\n slope = ncf.createVariable('slope', 'f4', ('dlat','dlon',))\n slope.units = 'local slope (deg)' \n \n for k in ['LAI_conif', 'LAI_decid', 'hc', 'cf', 'soilclass', 'flowacc', 'slope']:\n ncf[k][:,:] = data[k]\n \n print('**** done ****')",
"def _initialize_output(self, time_len, id_len):\r\n\r\n log('Initializing new file %s' % self.cf_compliant_file, 'INFO')\r\n \r\n self.cf_nc = Dataset(self.cf_compliant_file, 'w', format='NETCDF3_CLASSIC')\r\n \r\n # Create global attributes\r\n log(' globals', 'DEBUG', self.print_debug)\r\n self.cf_nc.featureType = 'timeSeries'\r\n self.cf_nc.Metadata_Conventions = 'Unidata Dataset Discovery v1.0'\r\n self.cf_nc.Conventions = 'CF-1.6'\r\n self.cf_nc.cdm_data_type = 'Station'\r\n self.cf_nc.nodc_template_version = (\r\n 'NODC_NetCDF_TimeSeries_Orthogonal_Template_v1.1')\r\n self.cf_nc.standard_name_vocabulary = ('NetCDF Climate and Forecast (CF) ' +\r\n 'Metadata Convention Standard Name ' +\r\n 'Table v28')\r\n self.cf_nc.title = 'RAPID Result'\r\n self.cf_nc.summary = (\"Results of RAPID river routing simulation. Each river \" +\r\n \"reach (i.e., feature) is represented by a point \" +\r\n \"feature at its midpoint, and is identified by the \" +\r\n \"reach's unique NHDPlus COMID identifier.\")\r\n self.cf_nc.time_coverage_resolution = 'point'\r\n self.cf_nc.geospatial_lat_min = 0.0\r\n self.cf_nc.geospatial_lat_max = 0.0\r\n self.cf_nc.geospatial_lat_units = 'degrees_north'\r\n self.cf_nc.geospatial_lat_resolution = 'midpoint of stream feature'\r\n self.cf_nc.geospatial_lon_min = 0.0\r\n self.cf_nc.geospatial_lon_max = 0.0\r\n self.cf_nc.geospatial_lon_units = 'degrees_east'\r\n self.cf_nc.geospatial_lon_resolution = 'midpoint of stream feature'\r\n self.cf_nc.geospatial_vertical_min = 0.0\r\n self.cf_nc.geospatial_vertical_max = 0.0\r\n self.cf_nc.geospatial_vertical_units = 'm'\r\n self.cf_nc.geospatial_vertical_resolution = 'midpoint of stream feature'\r\n self.cf_nc.geospatial_vertical_positive = 'up'\r\n self.cf_nc.project = self.project_name\r\n self.cf_nc.processing_level = 'Raw simulation result'\r\n self.cf_nc.keywords_vocabulary = ('NASA/Global Change Master Directory ' +\r\n '(GCMD) Earth Science Keywords. Version ' +\r\n '8.0.0.0.0')\r\n self.cf_nc.keywords = 'DISCHARGE/FLOW'\r\n self.cf_nc.comment = 'Result time step(s) (seconds): ' + str(self.time_step_array)\r\n \r\n timestamp = datetime.utcnow().isoformat() + 'Z'\r\n self.cf_nc.date_created = timestamp\r\n self.cf_nc.history = (timestamp + '; added time, lat, lon, z, crs variables; ' +\r\n 'added metadata to conform to NODC_NetCDF_TimeSeries_' +\r\n 'Orthogonal_Template_v1.1')\r\n \r\n # Create dimensions\r\n log(' dimming', 'DEBUG', self.print_debug)\r\n self.cf_nc.createDimension('time', time_len)\r\n self.cf_nc.createDimension(self.output_id_dim_name, id_len)\r\n \r\n # Create variables\r\n log(' timeSeries_var', 'DEBUG', self.print_debug)\r\n timeSeries_var = self.cf_nc.createVariable(self.output_id_dim_name, 'i4', \r\n (self.output_id_dim_name,))\r\n timeSeries_var.long_name = (\r\n 'Unique NHDPlus COMID identifier for each river reach feature')\r\n timeSeries_var.cf_role = 'timeseries_id'\r\n \r\n log(' time_var', 'DEBUG', self.print_debug)\r\n time_var = self.cf_nc.createVariable('time', 'i4', ('time',))\r\n time_var.long_name = 'time'\r\n time_var.standard_name = 'time'\r\n time_var.units = 'seconds since 1970-01-01 00:00:00 0:00'\r\n time_var.axis = 'T'\r\n \r\n #only add if user adds\r\n if self.comid_lat_lon_z_file and os.path.exists(self.comid_lat_lon_z_file):\r\n log(' lat_var', 'DEBUG', self.print_debug)\r\n lat_var = self.cf_nc.createVariable('lat', 'f8', (self.output_id_dim_name,),\r\n fill_value=-9999.0)\r\n lat_var.long_name = 'latitude'\r\n lat_var.standard_name = 'latitude'\r\n lat_var.units = 'degrees_north'\r\n lat_var.axis = 'Y'\r\n \r\n log(' lon_var', 'DEBUG', self.print_debug)\r\n lon_var = self.cf_nc.createVariable('lon', 'f8', (self.output_id_dim_name,),\r\n fill_value=-9999.0)\r\n lon_var.long_name = 'longitude'\r\n lon_var.standard_name = 'longitude'\r\n lon_var.units = 'degrees_east'\r\n lon_var.axis = 'X'\r\n \r\n log(' z_var', 'DEBUG', self.print_debug)\r\n z_var = self.cf_nc.createVariable('z', 'f8', (self.output_id_dim_name,),\r\n fill_value=-9999.0)\r\n z_var.long_name = ('Elevation referenced to the North American ' +\r\n 'Vertical Datum of 1988 (NAVD88)')\r\n z_var.standard_name = 'surface_altitude'\r\n z_var.units = 'm'\r\n z_var.axis = 'Z'\r\n z_var.positive = 'up'\r\n \r\n log(' crs_var', 'DEBUG', self.print_debug)\r\n crs_var = self.cf_nc.createVariable('crs', 'i4')\r\n crs_var.grid_mapping_name = 'latitude_longitude'\r\n crs_var.epsg_code = 'EPSG:4326' # WGS 84\r\n crs_var.semi_major_axis = 6378137.0\r\n crs_var.inverse_flattening = 298.257223563",
"def save_ecco_dataset_to_netcdf(ecco_ds,\n output_dir,\n dataset_name = 'by_variable',\n time_method = 'by_record',\n output_array_precision = np.float32,\n output_freq_code=None):\n\n\n # Create a name of the files if not specified\n # ---------------------------------------------\n if dataset_name =='by_variable':\n # concat all data variables together into a single string\n dataset_name = '_'.join(list(ecco_ds.data_vars))\n\n\n # force load coordinate values in case they are in dask array\n # -----------------------------------------------------------\n for coord in ecco_ds.coords:\n ecco_ds[coord].load()\n\n\n # Define fill values for NaN\n # ---------------------------------------------\n if output_array_precision == np.float32:\n netcdf_fill_value = nc4.default_fillvals['f4']\n\n elif output_array_precision == np.float64:\n netcdf_fill_value = nc4.default_fillvals['f8']\n\n\n # Create NetCDF encoding directives\n # ---------------------------------------------\n print('\\n... creating variable encodings')\n # ... data variable encoding directives\n dv_encoding = dict()\n for dv in ecco_ds.data_vars:\n dv_encoding[dv] = {'zlib':True, \\\n 'complevel':5,\\\n 'shuffle':True,\\\n '_FillValue':netcdf_fill_value}\n\n # ... coordinate encoding directives\n print('\\n... creating coordinate encodings')\n coord_encoding = dict()\n for coord in ecco_ds.coords:\n # set default no fill value for coordinate\n if output_array_precision == np.float32:\n coord_encoding[coord] = {'_FillValue':None, 'dtype':'float32'}\n elif output_array_precision == np.float64:\n coord_encoding[coord] = {'_FillValue':None, 'dtype':'float64'}\n\n # force 64 bit ints to be 32 bit ints\n if (ecco_ds[coord].values.dtype == np.int32) or \\\n (ecco_ds[coord].values.dtype == np.int64) :\n coord_encoding[coord]['dtype'] ='int32'\n\n # fix encoding of time\n if coord == 'time' or coord == 'time_bnds':\n coord_encoding[coord]['dtype'] ='int32'\n\n if 'units' in ecco_ds[coord].attrs:\n # apply units as encoding for time\n coord_encoding[coord]['units'] = ecco_ds[coord].attrs['units']\n # delete from the attributes list\n del ecco_ds[coord].attrs['units']\n\n elif coord == 'time_step':\n coord_encoding[coord]['dtype'] ='int32'\n\n # ... combined data variable and coordinate encoding directives\n encoding = {**dv_encoding, **coord_encoding}\n\n\n # Create directory for output files\n # ---------------------------------------------\n filepath = output_dir / dataset_name\n\n if not filepath.exists():\n filepath.mkdir(parents=True, exist_ok=True)\n\n\n # Determine output freqency code.\n # ---------------------------------------------\n # user can specify directory or it can be found if the dataset\n # has the 'time_coverage_resolution' global attribute\n if output_freq_code == None:\n if 'time_coverage_resolution' in ecco_ds.attrs:\n\n print('dataset time averaging from metadata')\n time_coverage_resolution = ecco_ds.attrs['time_coverage_resolution']\n if time_coverage_resolution == 'P1M':\n output_freq_code='AVG_MON'\n elif time_coverage_resolution == 'P1D':\n output_freq_code='AVG_DAY'\n elif time_coverage_resolution == 'P0S':\n output_freq_code='SNAP'\n else:\n print('output_freq_code not defined and not available in dataset metadata')\n print('... using full record time in filename')\n\n\n # Write records to disk as NetCDF\n # ---------------------------------------------\n # one file per time level\n\n if time_method == 'by_record':\n for time_i, rec_time in enumerate(ecco_ds.time):\n\n cur_ds = ecco_ds.isel(time=time_i)\n\n # cast data variables to desired precision (if necessary)\n #for data_var in cur_ds.data_vars:\n # if cur_ds[data_var].values.dtype != output_array_precision:\n # cur_ds[data_var].values = cur_ds[data_var].astype(output_array_precision)\n\n time_date_info =\\\n make_date_str_from_dt64(cur_ds.time.values, output_freq_code)\n\n # sort comments alphabetically\n print('\\n... sorting global attributes')\n cur_ds.attrs = sort_attrs(cur_ds.attrs)\n\n # add one final comment (PODAAC request)\n cur_ds.attrs[\"coordinates_comment\"] = \\\n \"Note: the global 'coordinates' attribute descibes auxillary coordinates.\"\n\n fname = dataset_name + '_' + time_date_info['short'] +\\\n '_' + time_date_info['ppp_tttt'] + '.nc'\n\n print(fname)\n print(cur_ds)\n netcdf_output_filename = filepath / fname\n\n # SAVE\n print('\\n... saving to netcdf ', netcdf_output_filename)\n cur_ds.to_netcdf(netcdf_output_filename, encoding=encoding)\n cur_ds.close()\n\n # one file per year\n elif time_method == 'by_year':\n unique_years = np.unique(ecco_ds.time.dt.year)\n print(unique_years)\n\n for year in unique_years:\n # pull out only records for this year\n cur_ds = ecco_ds.sel(time=slice(str(year), str(year)))\n\n first_time = cur_ds.time.values[0]\n last_time = cur_ds.time.values[-1]\n\n first_time_date_info =\\\n make_date_str_from_dt64(first_time, output_freq_code)\n\n last_time_date_info =\\\n make_date_str_from_dt64(last_time, output_freq_code)\n\n # sort comments alphabetically\n print('\\n... sorting global attributes')\n cur_ds.attrs = sort_attrs(cur_ds.attrs)\n\n # add one final comment (PODAAC request)\n cur_ds.attrs[\"coordinates_comment\"] = \\\n \"Note: the global 'coordinates' attribute descibes auxillary coordinates.\"\n\n fname = dataset_name + '_' +\\\n first_time_date_info['short'] + '_' +\\\n last_time_date_info['short'] + '_' +\\\n first_time_date_info['ppp_tttt']+ '.nc'\n\n print(fname)\n print(cur_ds)\n netcdf_output_filename = filepath / fname\n\n # SAVE\n print('\\n... saving to netcdf ', netcdf_output_filename)\n cur_ds.to_netcdf(netcdf_output_filename, encoding=encoding)\n cur_ds.close()",
"def runTest(self):\n ncfile = netCDF4.Dataset(URL)\n assert varname in ncfile.variables.keys()\n var = ncfile.variables[varname]\n assert var.shape == varshape\n data = var[:]\n assert_array_almost_equal(data.min(),varmin)\n assert_array_almost_equal(data.max(),varmax)\n ncfile.close()\n # test https support (linked curl lib must built with openssl support)\n ncfile = netCDF4.Dataset(URL_https)\n assert(ncfile['sst'].long_name=='Sea Surface Temperature') \n ncfile.close()",
"def _write_nc(self, FN, data):\n n_points = data['counts'][0] * data['counts'][1] * data['counts'][2]\n from netCDF4 import Dataset\n grid_nc = Dataset(FN, 'w', format='NETCDF4')\n grid_nc.createDimension('one', 1)\n grid_nc.createDimension('n_cartesian', 3)\n grid_nc.createDimension('n_points', n_points)\n grid_nc.createVariable('origin', 'f8', ('one', 'n_cartesian'))\n grid_nc.createVariable('counts', 'i8', ('one', 'n_cartesian'))\n grid_nc.createVariable('spacing', 'f8', ('one', 'n_cartesian'))\n grid_nc.createVariable('vals', 'f8', ('one', 'n_points'), zlib=True)\n for key in data.keys():\n grid_nc.variables[key][:] = data[key]\n grid_nc.close()",
"def runTest(self):\n nc = Dataset(self.file)\n data = nc['vl'][-1]\n # check max error of compression\n err = np.abs(data - self.data)\n assert(err.max() < nc['vl'].scale_factor)\n # turn off auto-scaling\n nc.set_auto_maskandscale(False)\n data = nc['vl'][-1]\n assert(data[-1] == np.around(self.data[-1]/nc['vl'].scale_factor))\n nc.close()",
"def execute(self, in_nc, in_weight_table, out_nc, grid_name, conversion_flag, in_time_interval=\"6hr\"): # modified this line CJB 20190218\r\n # MJS I might consider netCDF4.Dataset.variables['RO'].units\r\n # and upstream correction of the cdo grid conversion units attribute.\r\n\r\n # Validate the netcdf dataset\r\n vars_oi_index = self.dataValidation(in_nc)\r\n \r\n \"\"\"get conversion factor the flag is used to differentiate forecasts converted \r\n to netCDF from GRIB and the original netCDF. They both use the same weight tables\r\n but the original netCDF is in mm whereas the stock GRIB forecasts are in meters.\r\n Set the conversion_flag in the run.py configuration file.\r\n \"\"\"\r\n if conversion_flag: # Line Added CJB 20190218\r\n conversion_factor = 1.0 #Line Modified CJB 20190218\r\n elif grid_name == 'ecmwf_t1279' or grid_name == 'ecmwf_tco639': # Line Modified CJB 20190218\r\n #if grid_name == 'ecmwf_HRES_F' or grid_name == 'ecmwf_ENS_F': # Line Added/Modified CJB 20190108\r\n #new grids in mm instead of m\r\n conversion_factor = 0.001\r\n else: #set the conversion factor to 1 for everything else (data is in m but legacy installations do not have a flag) Line Added CJB 20190218\r\n conversion_factor = 1.0 # Line Added CJB 20190218\r\n # MJS I might consider netCDF4.Dataset.variables['RO'].units\r\n # and upstream correction of the cdo grid conversion units attribute.\r\n # identify if the input netcdf data is the High Resolution data with three different time intervals\r\n id_data = self.dataIdentify(in_nc)\r\n if id_data is None:\r\n raise Exception(self.errorMessages[3])\r\n\r\n ''' Read the netcdf dataset'''\r\n data_in_nc = NET.Dataset(in_nc)\r\n time = data_in_nc.variables['time'][:]\r\n\r\n # Check the size of time variable in the netcdf data\r\n if len(time) == 0: # *** MJS This change seems like it is too loose an error trap; should it account for instances when nc file time var is != in length with id_data lenght?\r\n raise Exception(self.errorMessages[3])\r\n #if len(time) != self.length_time[id_data]:\r\n # raise Exception(self.errorMessages[3])\r\n\r\n ''' Read the weight table '''\r\n print(\"Reading the weight table...\", in_weight_table)\r\n dict_list = {self.header_wt[0]:[], self.header_wt[1]:[], self.header_wt[2]:[],\r\n self.header_wt[3]:[], self.header_wt[4]:[]}\r\n\r\n with open(in_weight_table, \"r\") as csvfile:\r\n reader = csv.reader(csvfile)\r\n count = 0\r\n for row in reader:\r\n if count == 0:\r\n #check number of columns in the weight table\r\n if len(row) < len(self.header_wt):\r\n raise Exception(self.errorMessages[4])\r\n #check header\r\n if row[1:len(self.header_wt)] != self.header_wt[1:]:\r\n raise Exception(self.errorMessages[5])\r\n count += 1\r\n else:\r\n for i in range(len(self.header_wt)):\r\n dict_list[self.header_wt[i]].append(row[i])\r\n count += 1\r\n\r\n ''' Calculate water inflows\r\n as a reminder, the first 91 time steps are T=0 to T=90 and are 1-hourly for HRES\r\n\t\t the next 18 time steps for HRES are T=93 to T=144 at 3-hourly\r\n then the final 16 time steps are T=150 to T=240 at 6-hourly for a total of 125 records\r\n\t\t\tFor ENS, the first 49 time steps are T=0 to T=144 at 3-hourly\r\n\t\t\tthe final 35 time steps are T=150 to T=360 at 6-hourly for a total of 84 records\r\n '''\r\n\t\t\t\r\n print(\"Calculating water inflows...\")\r\n\t\t\r\n ''' \r\n added the next section CJB 20180122 \r\n '''\r\n\r\n\t\t# Get the overall number of time steps\r\n size_time = self.getTimeSize(in_nc) #CJB 20180122\r\n # Determine the size of time steps in each group (1-hourly, 3-hourly, and/or 6-hourly)\r\n if id_data == \"HRES1\": # T <= 90 \r\n time_size = (size_time - 1)\r\n elif id_data == \"HRES13\": # 93 <= T <= 144\r\n if in_time_interval == \"1hr\":\r\n time_size = self.length_time_opt[\"HighRes-1hr\"]\r\n else:\r\n time_size = (size_time - self.length_time_opt[\"HighRes-1hr\"] - 1)\r\n elif id_data == \"HRES136\": # 150 <= T <= 240\r\n if in_time_interval == \"1hr\":\r\n time_size = self.length_time_opt[\"HighRes-1hr\"]\r\n elif in_time_interval == \"3hr\": # MJS Doesn't seem to be a case used currently, but added just in case later need.\r\n time_size = self.length_time_opt[\"HighRes-3hr-sub\"] # MJS This is HRES136, i.e., if for some reason in ecmwf_rapid_multi a 3 hr is asked for for this case, it should still have the 3hr_sub number of times\r\n elif in_time_interval == \"3hr_subset\":\r\n time_size = self.length_time_opt[\"HighRes-3hr-Sub\"]\r\n else:\r\n time_size = (size_time - self.length_time_opt[\"HighRes-1hr\"] - self.length_time_opt[\"HighRes-3hr-Sub\"] - 1)\r\n elif id_data == \"ENS3\": # T <= 144\r\n time_size = (size_time - 1)\r\n elif id_data == \"ENS36\": # 150 <= T <= 360\r\n if in_time_interval == \"3hr_subset\":\r\n time_size = self.length_time_opt[\"LowResFull-3hr-Sub\"]\r\n else:\r\n time_size = (size_time - self.length_time_opt[\"LowResFull-3hr-Sub\"] - 1)\r\n else: # id_data == \"ENS6\": # T <= 360 but all 6-hourly\r\n time_size = (size_time - 1)\r\n #else: # something is wrong and need to throw an error message - likely a corrupt forecast file\r\n # raise Exception(self.errorMessages[3])\r\n #''' end of added section CJB 20180122 \r\n #'''\r\n\r\n #if id_data == \"LowRes\":\r\n # size_time = self.length_time_opt[\"LowRes-6hr\"]\r\n #elif id_data == \"Low3HrRes\":\r\n # size_time = self.length_time_opt[\"LowRes-3hr\"]\r\n #elif id_data == \"LowResFull\":\r\n # if in_time_interval == \"3hr_subset\":\r\n # size_time = self.length_time_opt[\"LowResFull-3hr-Sub\"]\r\n # elif in_time_interval == \"6hr_subset\":\r\n # size_time = self.length_time_opt[\"LowResFull-6hr-Sub\"]\r\n # else:\r\n # size_time = self.length_time_opt[\"LowRes-6hr\"]\r\n #else: #HighRes\r\n # if in_time_interval == \"1hr\":\r\n # size_time = self.length_time_opt[\"HighRes-1hr\"]\r\n # elif in_time_interval == \"3hr\":\r\n # size_time = self.length_time_opt[\"HighRes-3hr\"]\r\n # elif in_time_interval == \"3hr_subset\":\r\n # size_time = self.length_time_opt[\"HighRes-3hr-Sub\"]\r\n # elif in_time_interval == \"6hr_subset\":\r\n # size_time = self.length_time_opt[\"HighRes-6hr-Sub\"]\r\n # else:\r\n # size_time = self.length_time_opt[\"HighRes-6hr\"]\r\n\r\n size_streamID = len(set(dict_list[self.header_wt[0]]))\r\n\r\n # Create output inflow netcdf data\r\n # data_out_nc = NET.Dataset(out_nc, \"w\") # by default format = \"NETCDF4\"\r\n data_out_nc = NET.Dataset(out_nc, \"w\", format = \"NETCDF3_CLASSIC\")\r\n #dim_Time = data_out_nc.createDimension('Time', size_time)\r\n dim_Time = data_out_nc.createDimension('Time', time_size)\r\n dim_RiverID = data_out_nc.createDimension('rivid', size_streamID)\r\n var_m3_riv = data_out_nc.createVariable('m3_riv', 'f4', \r\n ('Time', 'rivid'),\r\n fill_value=0)\r\n \r\n #data_temp = NUM.empty(shape = [size_time, size_streamID])\r\n data_temp = NUM.empty(shape = [time_size, size_streamID])\r\n\r\n lon_ind_all = [int(i) for i in dict_list[self.header_wt[2]]]\r\n lat_ind_all = [int(j) for j in dict_list[self.header_wt[3]]]\r\n\r\n # Obtain a subset of runoff data based on the indices in the weight table\r\n min_lon_ind_all = min(lon_ind_all)\r\n max_lon_ind_all = max(lon_ind_all)\r\n min_lat_ind_all = min(lat_ind_all)\r\n max_lat_ind_all = max(lat_ind_all)\r\n\r\n # self.vars_oi[vars_oi_index][3] = RO; get that variable's 3D structure (time, lat_index, lon_index) ready to reshape into 2D (time, lat_index x lon_index)\r\n data_subset_all = data_in_nc.variables[self.vars_oi[vars_oi_index][3]][:, min_lat_ind_all:max_lat_ind_all+1, min_lon_ind_all:max_lon_ind_all+1]\r\n len_time_subset_all = data_subset_all.shape[0]\r\n len_lat_subset_all = data_subset_all.shape[1]\r\n len_lon_subset_all = data_subset_all.shape[2]\r\n data_subset_all = data_subset_all.reshape(len_time_subset_all, (len_lat_subset_all * len_lon_subset_all))\r\n\r\n # compute new indices based on the data_subset_all\r\n index_new = []\r\n for r in range(0,count-1):\r\n ind_lat_orig = lat_ind_all[r]\r\n ind_lon_orig = lon_ind_all[r]\r\n index_new.append((ind_lat_orig - min_lat_ind_all)*len_lon_subset_all + (ind_lon_orig - min_lon_ind_all))\r\n\r\n # obtain a new subset of data\r\n data_subset_new = data_subset_all[:,index_new]*conversion_factor\r\n\r\n # start compute inflow\r\n pointer = 0\r\n for s in range(0, size_streamID):\r\n npoints = int(dict_list[self.header_wt[4]][pointer])\r\n # Check if all npoints points correspond to the same streamID\r\n if len(set(dict_list[self.header_wt[0]][pointer : (pointer + npoints)])) != 1:\r\n print(\"ROW INDEX {0}\".format(pointer))\r\n print(\"RIVID {0}\".format(dict_list[self.header_wt[0]][pointer]))\r\n raise Exception(self.errorMessages[2])\r\n\r\n area_sqm_npoints = [float(k) for k in dict_list[self.header_wt[1]][pointer : (pointer + npoints)]]\r\n area_sqm_npoints = NUM.array(area_sqm_npoints)\r\n area_sqm_npoints = area_sqm_npoints.reshape(1, npoints)\r\n data_goal = data_subset_new[:, pointer:(pointer + npoints)]\r\n \r\n \r\n #remove noise from data\r\n data_goal[data_goal<=0.00001] = 0\r\n\r\n ''' IMPORTANT NOTE: runoff variable in ECMWF dataset is cumulative instead of incremental through time\r\n '''\r\n # For data with Low Resolution, there's only one time interval 6 hrs\r\n if id_data == \"ENS6\": # Line Added/Modified CJB 20190108\r\n #ro_stream = data_goal * area_sqm_npoints\r\n ro_stream = NUM.subtract(data_goal[1:,],data_goal[:-1,]) * area_sqm_npoints\r\n elif id_data == \"ENS3\": # there's only one time interval 3 hrs # Line Added/Modified CJB 20190108\r\n #ro_stream = data_goal * area_sqm_npoints\r\n ro_stream = NUM.subtract(data_goal[1:,],data_goal[:-1,]) * area_sqm_npoints # Line Added/Modified CJB 20190108\r\n elif id_data == \"HRES1\": # there's only one time interval 1 hrs # Line Added/Modified CJB 20190108\r\n #ro_stream = data_goal * area_sqm_npoints\r\n ro_stream = NUM.subtract(data_goal[1:,],data_goal[:-1,]) * area_sqm_npoints # Line Added/Modified CJB 20190108\t\r\n #For data with the full version of Low Resolution, from Hour 0 to 144 (the first 49 time points) are of 3 hr time interval,\r\n # then from Hour 144 to 360 (36 time points) are of 6 hour time interval\r\n elif id_data == \"ENS36\": # Line Added/Modified CJB 20190108\r\n if in_time_interval == \"3hr_subset\":\r\n #use only the 3hr time interval\r\n ro_stream = NUM.subtract(data_goal[1:49,], data_goal[:48,]) * area_sqm_npoints\r\n elif in_time_interval == \"6hr_subset\":\r\n #use only the 6hr time interval\r\n ro_stream = NUM.subtract(data_goal[49:,], data_goal[48:-1,]) * area_sqm_npoints\r\n else: #\"LowRes-6hr\"\r\n ######################################################\r\n # MJS Always assume this case will have a full ECMWF 240\r\n # hour forecast to work with. It's actually never re-\r\n # quested by ecmwf_rapid_multiprocess anyhow.\r\n ######################################################\r\n #convert all to 6hr\r\n # calculate time series of 6 hr data from 3 hr data\r\n ro_6hr_a = NUM.subtract(data_goal[2:49:2,], data_goal[:48:2,])\r\n # get the time series of 6 hr data\r\n ro_6hr_b = NUM.subtract(data_goal[49:,], data_goal[48:-1,])\r\n # concatenate all time series\r\n ro_stream = NUM.concatenate([ro_6hr_a, ro_6hr_b]) * area_sqm_npoints\r\n #For data with High Resolution, from Hour 0 to 90 (the first 91 time points) are of 1 hr time interval,\r\n # then from Hour 90 to 144 (18 time points) are of 3 hour time interval, and from Hour 144 to 240 (16 time points)\r\n # are of 6 hour time interval\r\n ##########################################################\r\n # MJS The following should handle id_data = HRES13 and HRES136\r\n ##########################################################\r\n else:\r\n if in_time_interval == \"1hr\":\r\n #ro_stream = NUM.subtract(data_goal[1:91,],data_goal[:90,]) * area_sqm_npoints\r\n ro_stream = NUM.subtract(data_goal[1:1+time_size,],data_goal[:time_size,]) * area_sqm_npoints # Line Added/Modified CJB, MJS 20190108\r\n elif in_time_interval == \"3hr\": # MJS HRES 3hr not currently used\r\n # calculate time series of 3 hr data from 1 hr data\r\n ro_3hr_a = NUM.subtract(data_goal[3:91:3,],data_goal[:88:3,])\r\n # get the time series of 3 hr data\r\n #ro_3hr_b = NUM.subtract(data_goal[91:109,], data_goal[90:108,])\r\n ro_3hr_b = NUM.subtract(data_goal[91:91+time_size,], data_goal[90:90+time_size,]) # MJS modified again; seems no case for this, but just in case later... Line Added/Modified CJB 20190108\r\n # concatenate all time series\r\n ro_stream = NUM.concatenate([ro_3hr_a, ro_3hr_b]) * area_sqm_npoints\r\n elif in_time_interval == \"3hr_subset\":\r\n #use only the 3hr time interval\r\n #ro_stream = NUM.subtract(data_goal[91:109,], data_goal[90:108,]) * area_sqm_npoints\r\n ro_stream = NUM.subtract(data_goal[91:91+time_size,], data_goal[90:90+time_size,]) * area_sqm_npoints # MJS modified again; needs to handle HRES13 that might not have complete 3hr set... Line Added/Modified CJB 20190108\r\n elif in_time_interval == \"6hr_subset\":\r\n #use only the 6hr time interval\r\n ro_stream = NUM.subtract(data_goal[109:,], data_goal[108:-1,]) * area_sqm_npoints\r\n ######################################################\r\n # MJS Always assume this case will have a full ECMWF 240 \r\n # hour forecast to work with. It's actually never re-\r\n # quested by ecmwf_rapid_multiprocess anyhow.\r\n ######################################################\r\n else: # in_time_interval == \"6hr\"\r\n #arcpy.AddMessage(\"6hr\")\r\n # calculate time series of 6 hr data from 1 hr data\r\n ro_6hr_a = NUM.subtract(data_goal[6:91:6,], data_goal[:85:6,])\r\n # calculate time series of 6 hr data from 3 hr data\r\n ro_6hr_b = NUM.subtract(data_goal[92:109:2,], data_goal[90:107:2,])\r\n # get the time series of 6 hr data\r\n ro_6hr_c = NUM.subtract(data_goal[109:,], data_goal[108:-1,])\r\n # concatenate all time series\r\n ro_stream = NUM.concatenate([ro_6hr_a, ro_6hr_b, ro_6hr_c]) * area_sqm_npoints\r\n \r\n #remove negative values\r\n ro_stream[ro_stream<0] = 0\r\n data_temp[:,s] = ro_stream.sum(axis = 1)\r\n\r\n pointer += npoints\r\n\r\n\r\n '''Write inflow data'''\r\n print(\"Writing inflow data...\")\r\n var_m3_riv[:] = data_temp\r\n # close the input and output netcdf datasets\r\n data_in_nc.close()\r\n data_out_nc.close()",
"def check_netcdf_file():\n # check the model file and extract necessary information\n # must be in the argument list\n if NETCDF_FILE_NAME is None:\n print('[ERROR] the netCDF model file name is required', flush=True)\n usage_csv()\n sys.exit(1)\n\n # user may provide full path\n elif os.path.isfile(NETCDF_FILE_NAME):\n model_file_name = NETCDF_FILE_NAME\n base_file_name, ext = os.path.splitext(model_file_name)\n\n # user may place it under the data directory\n elif os.path.isfile(os.path.join(DATA_DIR, NETCDF_FILE_NAME)):\n model_file_name = os.path.join(DATA_DIR, NETCDF_FILE_NAME)\n base_file_name, ext = os.path.splitext(model_file_name)\n\n # could not find the file\n else:\n print('[ERROR] could not find the netCDF model file {}'.format(NETCDF_FILE_NAME), flush=True)\n usage_csv()\n sys.exit(1)\n\n return model_file_name, base_file_name",
"def F_read_S5P_nc(self,fn,data_fields,data_fields_l2g=[]):\n from netCDF4 import Dataset\n ncid = Dataset(fn,'r')\n outp = {}\n for i in range(len(data_fields)):\n tmp = ncid[data_fields[i]]\n tmpdtype = tmp.dtype\n if not data_fields_l2g:\n varname = tmp.name\n else:\n varname = data_fields_l2g[i]\n if tmpdtype is \"str\":\n outp[varname] = tmp[:]\n else:\n outp[varname] = np.squeeze(tmp[:])\n ## scale factor already applied?! so confusing\n# try:\n# outp[varname] = outp[varname]*tmp.scale_factor\n# if tmp.scale_factor != 1:\n# print(varname+' has a scale_factor of '+'%s'%tmp.scale_factor)\n# except Exception:\n# #print(e)\n# print(varname+' has no scale_factor!')\n if 'time_utc' in outp.keys():\n UTC_matlab_datenum = np.zeros((len(outp['time_utc']),1),dtype=np.float64)\n for i in range(len(outp['time_utc'])):\n tmp = datetime.datetime.strptime(outp['time_utc'][i],'%Y-%m-%dT%H:%M:%S.%fZ')\n UTC_matlab_datenum[i] = (tmp.toordinal()\\\n +tmp.hour/24.\\\n +tmp.minute/1440.\\\n +tmp.second/86400.\\\n +tmp.microsecond/86400/1000000+366.)\n outp['UTC_matlab_datenum'] = np.tile(UTC_matlab_datenum,(1,outp['latc'].shape[1]))\n else: # hcho l2 does not have time_utc\n # the delta_time field of hcho fills all across track position, but ch4 is one per scanline\n if len(outp['delta_time'].shape) == 1:\n outp['delta_time'] = np.tile(outp['delta_time'][...,None],(1,outp['latc'].shape[1]))\n outp['UTC_matlab_datenum'] = (outp['time']+outp['delta_time']/1000.)/86400.+734139.\n \n outp['across_track_position'] = np.tile(np.arange(1.,outp['latc'].shape[1]+1),\\\n (outp['latc'].shape[0],1)).astype(np.int16)\n return outp",
"def verify_netcdf(extents_dir, out_ncfile):\n netcdf_old=out_ncfile #'/g/data/fk4/wofs/water_f7q/extents/149_-036/LS_WATER_149_-036_1987-05-22T23-08-20.154_2014-03-28T23-47-03.171.nc'\n\n tiles = [make_tileinfo(filename) for filename in glob(os.path.join(extents_dir, '*.tif'))]\n tiles.sort(key=lambda t: t.datetime)\n\n with netCDF4.Dataset(netcdf_old) as nco:\n for i in range(0,len(tiles)):\n print nco['time'][i]\n print tiles[i]\n with rasterio.open(tiles[i].filename) as tile_data:\n print \"Any difference? \" \n print numpy.sum(nco['Data'][:,:,i])\n print numpy.sum(tile_data.read(1))\n\n print type(nco['Data'][:,:,i]), type(tile_data.read(1))\n print nco['Data'][:,:,i].shape, tile_data.read(1).shape\n \n print numpy.sum(nco['Data'][:,:,i] - tile_data.read(1)[:,:])\n #print tile_data.read(1)[0:100,0:100] \n\n #print (nco['Data'][:,:,i] == tile_data.read(1)).all()",
"def is_valid_netcdf_file(nc_data):\n fname = Path(nc_data.filepath()).name\n \n start_str = fname.split(\"_\")[3][1:-1]\n start_fname = dt.datetime.strptime(\n start_str + \" UTC\",\n \"%Y%j%H%M%S %Z\",\n )\n start_fname = start_fname.replace(tzinfo=dt.timezone.utc)\n end_str = fname.split(\"_\")[4][1:-1]\n end_fname = dt.datetime.strptime(end_str + \" UTC\", \"%Y%j%H%M%S %Z\")\n end_fname = end_fname.replace(tzinfo=dt.timezone.utc)\n \n avg_fname = start_fname + (end_fname - start_fname) / 2\n \n vtime = get_valid_time(nc_data)\n if vtime is None:\n return False\n \n diff = (avg_fname - vtime).total_seconds()\n \n if diff > 60:\n return False\n \n return True",
"def write_netcdf(ncinfo):\r\n\t# ========== Create new netcdf ==========\r\n\tNAME=nc.netcdf_file(ncinfo.fname,'w')\r\n\t\r\n\t# ========== Set up the Dimensions ==========\r\n\tNAME.createDimension('time', None) #Question: Shouldn't time be unlimited?\r\n\t# NAME.createDimension('lev',11)\r\n\tNAME.createDimension('lat',ncinfo.lat)\r\n\tNAME.createDimension('lon',ncinfo.lon)\r\n\t\r\n\t# ========== Setup the Variables ==========\r\n\ttime=NAME.createVariable('time',np.float64,('time',))\r\n\t# lev=NAME.createVariable('lev',np.int32,('lev',))\r\n\tlat=NAME.createVariable('lat',np.float64,('lat',))\r\n\tlon=NAME.createVariable('lon',np.float64,('lon',))\r\n\t# VAR=NAME.createVariable(str(VAR),np.float64,('time','lev','lat','lon'),)\r\n\tVAR=NAME.createVariable(ncinfo.var_name,np.float64,('time','lat','lon'),)\r\n\t# setting the missing value is super important for the file to be cdo readable\r\n\tsetattr(VAR,'missing_value',ncinfo.fill)\r\n\tsetattr(VAR, 'standard_name', ncinfo.var_lname) \r\n\t\r\n\t# ========== Set the units ==========\r\n\ttime.units= 'day as %Y%m%d'\r\n\t# lev.units = '-'\r\n\tlat.units = 'degrees_north'\r\n\tlon.units = 'degrees_east'\r\n\tVAR.units = ncinfo.units\r\n\r\n\t# ========== Add data ==========\r\n\t\r\n\t# creates time vector using the date_range function\r\n\t# time[:]=[t for t in date_range('20110101.5','20111231.5')] \r\n\t# lev[:]=PFT_vector\r\n\tlat[:] = ncinfo.latitudes\r\n\tlon[:] = ncinfo.longitudes\r\n\t# THis is a Bodge for singe variable data\r\n\tVAR[:] = ncinfo.data\r\n\r\n\t#Add global attributes\r\n\tNAME.description = ncinfo.description\r\n\tNAME.history = ncinfo.history\r\n\r\n\t# WHATS MISSING\r\n\t# metadata a whole bunch of metadata\r\n\t# the standard_name and long_name of the variables\r\n\r\n\t# ========== Close the netcdf ==========\r\n\tNAME.close()",
"def cl_file(tmp_path):\n nc_path = os.path.join(tmp_path, 'cesm2_waccm_cl.nc')\n dataset = Dataset(nc_path, mode='w')\n dataset.createDimension('lev', size=2)\n dataset.createDimension('bnds', size=2)\n\n # Dimensional variables\n dataset.createVariable('lev', np.float64, dimensions=('lev',))\n dataset.createVariable('lev_bnds', np.float64, dimensions=('lev', 'bnds'))\n dataset.variables['lev'][:] = [1.0, 2.0]\n dataset.variables['lev'].bounds = 'lev_bnds'\n dataset.variables['lev'].units = '1'\n dataset.variables['lev_bnds'][:] = [[0.5, 1.5], [1.5, 3.0]]\n dataset.variables['lev_bnds'].standard_name = (\n 'atmosphere_hybrid_sigma_pressure_coordinate')\n dataset.variables['lev_bnds'].units = '1'\n dataset.variables['lev_bnds'].formula_terms = (\n 'p0: p0 a: a_bnds b: b_bnds ps: ps')\n\n # Coordinates for derivation of pressure coordinate\n dataset.createVariable('a', np.float64, dimensions=('lev',))\n dataset.createVariable('a_bnds', np.float64, dimensions=('lev', 'bnds'))\n dataset.createVariable('b', np.float64, dimensions=('lev',))\n dataset.createVariable('b_bnds', np.float64, dimensions=('lev', 'bnds'))\n dataset.variables['a'][:] = [1.0, 2.0]\n dataset.variables['a'].bounds = 'a_bnds'\n dataset.variables['a_bnds'][:] = [[1.5, 0.0], [3.0, 1.5]]\n dataset.variables['b'][:] = [0.0, 1.0]\n dataset.variables['b'].bounds = 'b_bnds'\n dataset.variables['b_bnds'][:] = [[0.5, -1.0], [2.0, 0.5]]\n\n dataset.close()\n return nc_path",
"def repair_netcdf(fname):\n\n\t# ========== Set the path and the file name ==========\n\t# fname = \"%s_%s_%s_r1i1p1_%s_1950_2050_%s_regrid.nc\" %(var, model, sen, units, sen)\n\tfout = \"%s_setgrid\" % (fname)\n\n\t\n\t# ========== Create a list of files to cleanup ==========\n\tcleanup = []\n\n\t# ========== Check if the file exists ==========\n\tif not os.path.isfile(fname+\".nc\"):\n\t\t# check if the file exists with a different name\n\t\traise IOError(\"WARNING: The file %s cannot be found\"% fname)\n\n\t\n\t# ========== Read longitude from NC file ==========\n\tfh = Dataset(fname+\".nc\", mode='r')\n\ttry:\n\t\tlon = fh.variables['longitude'][:]\n\texcept:\n\t\ttry:\n\t\t\tlon = fh.variables['lon'][:]\n\t\texcept:\n\t\t\tlon = fh.variables['easting'][:] #easting\n\n\n\n\n\t# ========== Create a new grid ==========\n\t# Save the current grid\n\tsubp.call(\"cdo griddes %s.nc > %sGriddes\" % (fname, fname), shell=True)\n\t# add the griddes to the cleanup \n\tcleanup.append(\"%sGriddes\" % fname)\n\n\t# open the current grid\n\tgfile = open(\"%sGriddes\" % fname, \"r\") \n\t# Split the lines of the grid file\n\tginfo = gfile.read().splitlines()\n\t\n\t#Some models have no lat/lon bounds, skip in this case and copy\n\t#\"regrid\" file as \"setgrid\"\n\tif not (any([n.startswith(\"xbounds\") for n in ginfo]) and \n\t\t any([n.startswith(\"ybounds\") for n in ginfo])):\n\t\tsubp.call(\"cp %s.nc %s.nc\" % (fname, fout), shell=True)\n\t\tcleanup.append(\"%s.nc\" % fname)\n\t\treturn cleanup\t\n\t\n\t# Check and see if the start is known\n\tif (\n\t\tany([n.startswith(\"xfirst\") for n in ginfo])\n\t\t) and (\n\t\tany([n.startswith(\"xinc\") for n in ginfo])\n\t\t):\n\t\taddxdet = False\n\t\t# Set the lines to be removed\n\t\tbadel = [\"xvals\", \"yvals\", \" \", \"xbounds\", \"ybounds\"]\n\telse:\n\t\taddxdet = True\n\t\t# Set the lines to be removed\n\t\tbadel = [\"xvals\", \"yvals\", \" \", \"xbounds\", \"ybounds\", \"xfirst\", \"xinc\"]\n\n\t# Create list to hold the new grid details\n\tnew_grid = []\n\n\tfor ginf in ginfo:\n\t\ttest = []\n\t\tfor be in badel:\n\t\t\tif ginf.startswith(be):\n\t\t\t\ttest.append(False)\n\t\t\telif ginf == \"#\":\n\t\t\t\ttest.append(False)\n\t\t\telse:\n\t\t\t\ttest.append(True)\n\t\t\n\t\tif all(test):\n\t\t\tnew_grid.append(ginf)\n\t# Add the additional x variables\n\tif addxdet:\n\t\t# work out the model from the fname\n\t\tmodel = fname.split(\"/\")[-2]\n\t\tnew_grid.append('xfirst = -180')\n\t\tnew_grid.append('xinc = %s' % str(\n\t\t\tfloat(lon) ))\n\t\n\n\t# Check the y values, if they are missing use the ones in the original grid file\n\tif not (any([n.startswith(\"yfirst\") for n in ginfo])):\n\t\t# print (\"Seting the y bounds\")\n\t\tvals = []\n\t\tfor glov in range(0,len(ginfo)):\n\t\t\tif ginfo[glov].startswith(\"yvals\"):\n\t\t\t\tvals.append(glov)\n\t\t\telif ginfo[glov].startswith(\"ybounds\"):\n\t\t\t\tvals.append(glov)\n\t\tif len (vals) == 2:\n\t\t\tfor yv in ginfo[vals[0]:vals[1]]:\n\t\t\t\tnew_grid.append(yv)\n\n\t\telse:\n\t\t\tprint(\"\\n\")\n\t\t\traise IndexError(\"Bounding is incorrect\")\n\n\t# Save the grid out\n\tnewgrid = save_grid(fname, new_grid)\n\tcleanup.append(newgrid)\n\n\t# ========== Set the new grid file ==========\n\t# Save the current grid\n\tsubp.call(\"cdo setgrid,%sGridFix %s.nc %s.nc\" % (fname, fname, fout), shell=True)\n\t\n\tif not os.path.isfile(\"%s.nc\" % fout):\n\t\traise IOError(\"The output file was not created, going interactive\")\n\t\n\t# ========== return the files to be removed ==========\n\tcleanup.append(\"%s.nc\" % fname)\n\treturn cleanup",
"def testCC(self):\n self.assertEqual(\n self.cc,\n self.ccr.cc\n )\n\n self.assertEqual(\n None,\n self.ccr_bad.cc\n )",
"def _validate_raw_nc(self):\r\n\r\n self.raw_nc_list = []\r\n total_time_len = 1 #add one for the first flow value RAPID\r\n #does not include\r\n id_len_list = []\r\n for rapid_output_file in self.rapid_output_file_list:\r\n qout_nc = RAPIDDataset(rapid_output_file)\r\n id_len_list.append(qout_nc.size_river_id)\r\n total_time_len += qout_nc.size_time\r\n self.raw_nc_list.append(qout_nc)\r\n \r\n #make sure river id lists are the same\r\n for id_len_undex in range(1, len(id_len_list)):\r\n if id_len_list[id_len_undex] != id_len_list[0]:\r\n raise Exception(\"ERROR: River ID size is different in one of the files ...\")\r\n \r\n for raw_nc_index in range(1, len(self.raw_nc_list)):\r\n if not (self.raw_nc_list[raw_nc_index].get_river_id_array() == self.raw_nc_list[0].get_river_id_array()).all():\r\n raise Exception(\"ERROR: River IDs are different in files ...\")\r\n\r\n return id_len_list[0], total_time_len",
"def processData(self,data):\n #print 'I GOT DATA',data,[0],data[1]\n # Check for valid data (not null or empty string)\n #print '**************NOTIFICATION***************',type(_RobotCommunicator.WALL_HEADER),type(data[0])\n if data:\n #print '**************NOTIFICATION***************',type(_RobotCommunicator.WALL_HEADER),type(data[0]),_RobotCommunicator.WALL_HEADER==data[0]\n\n # Check header and assign data appropriately\n # TODO: Check length of data for validity\n #print 'Header',data[0]\n if data[0] == _RobotCommunicator.POSE_HEADER:\n self.pose = unpack(_RobotCommunicator.POSE_FORMAT,data[1:])\n elif data[0] == _RobotCommunicator.SENSOR_HEADER:\n\n #for i in range(1, len(data)-1, 2):\n index= unpack('B',data[1])\n value = unpack('?',data[2])\n # Update old values or create new sensor-value pair\n self.sensors[index[0]] = value[0]\n #print 'in csharp: ',[index,value]\n\n elif data[0] == _RobotCommunicator.WAYPOINT_HEADER:\n self.waypoints = [] # Clear old waypoints\n for i in range(1, len(data)-16, 16):\n x,y = unpack(_RobotCommunicator.WAYPOINT_FORMAT,\n data[i:i+15])\n self.waypoints.append((x,y))\n elif data[0] == _RobotCommunicator.DIRECTION_HEADER:\n self.direction = unpack(_RobotCommunicator.DIRECTION_FORMAT,\n data[1:])\n elif data[0] == _RobotCommunicator.ACTUATOR_HEADER:\n self.actuators = [] # Clear old actuator commands for i in range(1, len(data)-1):\n self.actuators.append(unpack(\n _RobotCommunicator.ACTUATOR_FORMAT,data[i]))\n elif data[0] == _RobotCommunicator.WALL_HEADER:\n self.walls = {} # Clear old wall entries\n index = unpack('B', data[1])\n x1,y1,x2,y2 = unpack(_RobotCommunicator.WALL_FORMAT,data[2:34])\n self.walls = (x1,y1,x2,y2)\n #print '**************Coordinates***************',(x1,y1,x2,y2)\n print '****self.walls*********',self.walls\n elif data[0] == _RobotCommunicator.OBS_HEADER:\n index = unpack('B', data[1])\n add,x1,y1 = unpack(_RobotCommunicator.OBS_FORMAT,data[2:26])\n #print '***********self.obs*************'+','.join(map(str,[add,x1,y1]))\n self.obs = [add,x1,round(y1,2)]\n if add == 1:\n a = PolyShapes.Rectangle(self.resolX,self.resolY)\n a.shift(x1,y1)\n self.obsPoly += a\n self.receiveObs = True\n #print \"add obstacle:\" + str(x1) + \",\"+ str(y1)\n elif add == 4:\n if x1 == 0:\n self.STOP = True\n else:\n self.STOP = False\n else:\n a = PolyShapes.Rectangle(self.resolX,self.resolY)\n a.shift(x1,y1)\n self.obsPoly -= a\n self.receiveObs = True\n #print \"del obstacle:\"+ str(x1) + \",\"+ str(y1)\n\n\n else:\n print \"Unexpected or corrupted data packet received.\"",
"def Save2Nc(self):\r\n\r\n frameNumber = self.spinBox_FrameNum.value()\r\n\r\n segmentNumber = self.spinBox_SegmentNum.value()\r\n\r\n exposeTime = self.spinBox_ExpTime.value()\r\n width = self.spinBox_Width.value()\r\n xshift = self.spinBox_XShift.value()\r\n hight = self.spinBox_Hight.value()\r\n yshift = self.spinBox_Yshift.value()\r\n\r\n print(\"frameNumber, segmentNumber, width, high is: \", frameNumber, segmentNumber, width, hight)\r\n app = ReadData(noteObj = self.textBrowser_SetMeasureInf, frameNumber=frameNumber, segmentFrame=segmentNumber, width=width, hight=hight)\r\n self.multiFrameData = app.ImageData()\r\n\r\n options = QFileDialog.Options()\r\n options |= QFileDialog.DontUseNativeDialog\r\n # it just provides the name of file that you want to write into\r\n fileName, _= QFileDialog.getSaveFileName(self,\"QFileDialog.getSaveFileName()\",\"\",\"All Files (*);;NC Files (*.nc)\", options=options)\r\n \r\n if fileName:\r\n print(fileName)\r\n\r\n self.multiFrameData.to_netcdf(fileName + '.nc')\r\n self.textBrowser_SetMeasureInf.setTextColor(QtCore.Qt.green)\r\n self.textBrowser_SetMeasureInf.append(\"the data has saved as .nc file! \")",
"def check_Data(self):\r\n \r\n if self._target_data is None:\r\n self.processData()",
"def test_noDuplicateCTCPDispatch(self):\n\n def testCTCP(user, channel, data):\n self.called += 1\n\n self.called = 0\n self.client.ctcpQuery_TESTTHIS = testCTCP\n\n self.client.irc_PRIVMSG(\n \"foo!bar@baz.quux\",\n [\"#chan\", \"{X}TESTTHIS{X}foo{X}TESTTHIS{X}\".format(X=irc.X_DELIM)],\n )\n self.assertEqualBufferValue(self.file.getvalue(), \"\")\n self.assertEqual(self.called, 1)"
] | [
"0.60441834",
"0.5924442",
"0.5870361",
"0.56942517",
"0.5658238",
"0.56382203",
"0.5624735",
"0.5600587",
"0.5563595",
"0.55417067",
"0.54565823",
"0.5366081",
"0.5311197",
"0.53014123",
"0.5301123",
"0.52937174",
"0.5290408",
"0.5289225",
"0.52625626",
"0.52489024",
"0.5247772",
"0.5214412",
"0.52054906",
"0.518749",
"0.51700234",
"0.5149499",
"0.5134563",
"0.5132516",
"0.5125593",
"0.51050097"
] | 0.74189216 | 0 |
Test solar data handling from CC data file with clearsky ratio calculated using clearsky ratio from NSRDB h5 file. | def test_solar_cc():
features = ['clearsky_ratio', 'rsds', 'clearsky_ghi']
input_files = [os.path.join(TEST_DATA_DIR, 'rsds_test.nc')]
nsrdb_source_fp = os.path.join(TEST_DATA_DIR, 'test_nsrdb_co_2018.h5')
with xr.open_mfdataset(input_files) as fh:
min_lat = np.min(fh.lat.values)
min_lon = np.min(fh.lon.values) - 360
target = (min_lat, min_lon)
shape = (len(fh.lat.values), len(fh.lon.values))
with pytest.raises(AssertionError):
handler = DataHandlerNCforCC(input_files, features=features,
target=target, shape=shape,
val_split=0.0,
worker_kwargs=dict(max_workers=1))
handler = DataHandlerNCforCC(input_files, features=features,
nsrdb_source_fp=nsrdb_source_fp,
target=target, shape=shape,
temporal_slice=slice(0, 1),
val_split=0.0,
worker_kwargs=dict(max_workers=1))
cs_ratio = handler.data[..., 0]
ghi = handler.data[..., 1]
cs_ghi = handler.data[..., 2]
cs_ratio_truth = ghi / cs_ghi
assert cs_ratio.max() < 1
assert cs_ratio.min() > 0
assert (ghi < cs_ghi).all()
assert np.allclose(cs_ratio, cs_ratio_truth)
with Resource(nsrdb_source_fp) as res:
meta = res.meta
tree = KDTree(meta[['latitude', 'longitude']])
cs_ghi_true = res['clearsky_ghi']
# check a few sites against NSRDB source file
for i in range(4):
for j in range(4):
test_coord = handler.lat_lon[i, j]
_, inn = tree.query(test_coord)
assert np.allclose(cs_ghi_true[0:48, inn].mean(), cs_ghi[i, j]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def main():\n regexham = r'\\s+\\((\\d+,\\s*\\d+)\\)\\s+([\\-+]?\\d+\\.\\d+[eEdD]?[\\-+]?\\d+)' #to extract the Hamiltonian.\n root = '.'\n #fname = 'output_files/'\n ciffci = CIFlow_Reader('testfci.dat', regexp = regexham , read_ham= True)\n ciffcipar = CIFlow_Reader( 'psi0_output10outputfci.dat', regexp = regexham , read_ham = True)\n #print ciffci.calc_overlap(cifdoci)\n #print e.get_groundstate('00000000000011|00000000000011') \n\n psir = rp.PsiReader('psi0_output10.dat', isbig = False, numorbs = -1 , read_ints = False)\n\n detlist = dw.cimain(psir.values['nalpha'],psir.values['nbeta'], psir.values['norb'], [range(1,psir.values['nalpha']+psir.values['nbeta']), []], [] , fname = 'determinants.dat' ,ref = [lambda x , y , z : psir.get_hf_orbs()] , add_frozen = 0, write = False) #CISDDOCI\n count = 0\n for det in detlist:\n for det2 in detlist:\n #+ because the eigenvectors have already a different phasefactor of 1.\n if abs(ciffci.get_mat_element(det[0]+'|'+det[1], det2[0]+'|'+det2[1]) - ciffcipar.get_mat_element(det[0]+'|'+det[1], det2[0]+'|'+det2[1]) ) > 1e-10 :\n print 'difference in hamiltonian row: ' , det[0]+'|'+det[1] , \" col: \" , det2[0]+'|'+det2[1] , 'fci: ', ciffci.get_mat_element(det[0]+'|'+det[1], det2[0]+'|'+det2[1]) , 'fciaddres: ' , ciffcipar.get_mat_element(det[0]+'|'+det[1],det2[0]+'|'+det2[1]) \n count += 1\n print 'There were ' , count , ' different elements'",
"def load_data():\n\t\t# load the data\n\t\tDATPATH = \"../data/\"\n\t\t#fnino = DATPATH + \"nino3.csv\" # 1871-2000\n\t\tfnino = DATPATH + \"tas_Amon_IPSL-CM5A-LR_past1000_r1i1p1_0850_1850_nino3_tseries.csv\" # 1871-2016\n\t\t#fnino = DATPATH + \"nino34.long.data\"\n\t\t#nc_data_nino3 = netCDF4.Dataset(fnino)\n\t\t#nino3_load = nc_data_nino3.variables['tas'][:]\n\t\t#dnino = nino3_load.flatten()\n\n\t\tdnino = np.genfromtxt(fnino, delimiter=\",\", dtype=float).flatten()\n\t\t#fismr = DATPATH + \"ismr.csv\" # 1871-2000\n\t\t#fismr = DATPATH + \"psl_Amon_IPSL-CM5A-LR_past1000_r1i1p1_0850_1850_1_india_goswami_2002_tseries.csv\" # 1871-2016\n\t\tfismr = DATPATH + \"pr_Amon_IPSL-CM5A-LR_past1000_r1i1p1_0850_1850_goswami_india_tseries.csv\" # 1871-2016\n\t\tdismr = np.genfromtxt(fismr, delimiter=\",\", dtype=float).flatten()\n\t\t#fvolc = DATPATH + \"robock.txt\" # 1871-2000\n\t\tfvolc = DATPATH + \"sigl.txt\" # 1871-2016\n\t\tdvolc = np.genfromtxt(fvolc, delimiter=\",\", dtype=float).flatten()\n\n\t\tfvolc_source = DATPATH + \"volc_source_850_1850.csv\" # 1871-2016\n\t\tdvolc_source = np.genfromtxt(fvolc_source, delimiter=\",\", dtype=float).flatten()\n\t\t# simple check for data consistency\n\t\tassert dnino.shape == dismr.shape, \"Data sets are unequal!\"\n\t\tassert int(dismr.shape[0]/12) == dvolc.shape[0], \"Data sets are unequal\"\n\t\treturn dnino, dismr, dvolc, dvolc_source",
"def runTest(self):\n ncfile = netCDF4.Dataset(URL)\n assert varname in ncfile.variables.keys()\n var = ncfile.variables[varname]\n assert var.shape == varshape\n data = var[:]\n assert_array_almost_equal(data.min(),varmin)\n assert_array_almost_equal(data.max(),varmax)\n ncfile.close()\n # test https support (linked curl lib must built with openssl support)\n ncfile = netCDF4.Dataset(URL_https)\n assert(ncfile['sst'].long_name=='Sea Surface Temperature') \n ncfile.close()",
"def runTest(self):\n nc = Dataset(self.file)\n data = nc['vl'][-1]\n # check max error of compression\n err = np.abs(data - self.data)\n assert(err.max() < nc['vl'].scale_factor)\n # turn off auto-scaling\n nc.set_auto_maskandscale(False)\n data = nc['vl'][-1]\n assert(data[-1] == np.around(self.data[-1]/nc['vl'].scale_factor))\n nc.close()",
"def nc_to_hdf5_mudis(dataset, config):\n np.warnings.filterwarnings('ignore')\n\n date = datetime.datetime.strptime(dataset.recorddate,\n '%d.%m.%Y') # read date from dateset\n date_name = datetime.datetime.strftime(date,\n '%Y%m%d') # convert date to YYYYMMDD format\n config['date'] = date_name\n\n # Create the directory to save the results\n path = config['str_dir'] + '/radiance/{}/data/'.format(config['date'])\n os.makedirs(os.path.dirname(path), exist_ok=True)\n\n # Read time of the file (correct time)\n time = datetime.datetime.strptime(dataset.recordtime, '%H:%M:%S.')\n time = datetime.datetime.time(time)\n\n # convert time to datetime format\n datetime_name = datetime.datetime.combine(date, time)\n new_name = datetime.datetime.strftime(datetime_name, '%Y%m%d_%H%M%S')\n\n # radiance = dataset.variables['data'][:].reshape(113, 1281)\n # wavelength_axis = dataset.variables['xAxis'][:]\n\n # Create a file in the disk\n with h5py.File(config['str_dir'] + '/radiance/{}/data/{}.h5'.format(\n config['date'], new_name), 'w') as datos:\n\n if not list(datos.items()):\n # Create two datasets(use only one time)\n datos.create_dataset('/data',\n data=dataset['data'][:].reshape(113, 1281),\n dtype='f4')\n # datos.create_dataset('/skymap', data=skymap, dtype='f4')\n else:\n del datos['data']\n # del datos['skymap']\n print('data deleted and corrected')\n datos.create_dataset('/data', data=data, dtype='f4')\n # datos.create_dataset('/skymap', data=skymap, dtype='f4')\n\n # Add attributes to datasets\n datos['data'].attrs['time'] = str(time)\n datos['data'].attrs['Exposure'] = dataset.exposuretime\n datos['data'].attrs['NumAver'] = dataset.AVERAGED\n datos['data'].attrs['CCDTemp'] = dataset.detectortemperature\n datos['data'].attrs['NumSingMes'] = dataset.noofaccumulations\n # datos['data'].attrs['ElectrTemp'] = dataset.\n datos['data'].attrs['Latitude'] = '52.39N'\n datos['data'].attrs['Longitude'] = '9.7E'\n datos['data'].attrs['Altitude'] = '65 AMSL'\n\n chn = np.arange(1, 114)\n datos.create_dataset('/channel', data=chn, dtype=np.float32)\n datos.create_dataset('/wavelength', data=dataset['xAxis'][:])\n\n datos['data'].dims.create_scale(datos['channel'], 'channel')\n datos['data'].dims[0].attach_scale(datos['channel'])\n datos['data'].dims[0].label = 'channel'\n datos['data'].dims[1].label = 'wavelength'\n\n # datos['skymap'].dims[0].label = 'channel'\n # datos['skymap'].dims[1].label = 'Azimuth, Zenith'\n\n datos.close()",
"def extract_hrc_data(obsid, data_dir):\n#\n#--- extract fits data\n#\n line = 'operation=retrieve\\n'\n line = line + 'dataset=flight\\n'\n line = line + 'level=1\\n'\n line = line + 'detector=hrc\\n'\n line = line + 'obsid=' + str(obsid) + '\\n'\n line = line + 'go\\n'\n\n with open('zline', 'w') as fo:\n fo.write(line)\n\n cmd = ' /proj/sot/ska/bin/arc5gl -user isobe -script zline > zout'\n os.system(cmd)\n#\n#--- create directories and move the data into them\n#\n cmd = 'mkdir primary secondary'\n os.system(cmd)\n \n cmd = 'mv *dtf1*fits* *fov*fits* ./primary/.'\n os.system(cmd)\n\n cmd = 'mv *bpix1*fits* *evt1*fits* *msk1*fits* *mtl1*fits* \\\n *std_dtfstat1.fits* *std_flt1.fits* ./secondary/.'\n os.system(cmd)\n\n line = 'operation=retrieve\\n'\n line = line + 'dataset=flight\\n'\n line = line + 'level=1\\n'\n line = line + 'detector=pcad\\n'\n line = line + 'subdetector=aca\\n'\n line = line + 'obsid=' + str(obsid) + '\\n'\n line = line + 'go\\n'\n\n with open('zline', 'w') as fo:\n fo.write(line)\n\n cmd = ' /proj/sot/ska/bin/arc5gl -user isobe -script zline > zout'\n os.system(cmd)\n cmd = 'mv *asol*fits* ./primary/.'\n os.system(cmd)\n\n cmd = 'rm -rf *fits* zline zout'\n os.system(cmd)\n\n hdir = data_dir + '/' + str(obsid)\n if os.path.isdir(hdir):\n cmd = 'rm -rf ' + hdir + '/*'\n os.system(cmd)\n else:\n cmd = 'mkdir ' + hdir \n os.system(cmd)\n\n cmd = 'chmod 774 primary/* secondary/*'\n os.system(cmd)\n\n#\n#--- check whether there are duplicated fits files extracted; if so, remove older ones\n#\n h_list = ['dtf1', 'fov1', 'asol1']\n sdir = 'primary'\n remove_duplicate(h_list, sdir)\n\n h_list = ['bpix1', 'evt1', 'msk1', 'mtl1', 'std_dtfstat1', 'std_flt1']\n sdir = 'secondary'\n remove_duplicate(h_list, sdir)\n\n cmd = 'mv primary secondary ' + hdir + '/.'\n os.system(cmd)\n\n cmd = 'rm -rf ' + hdir + '/analysis/* ' \n os.system(cmd)\n\n return check_data_exist(hdir)",
"def main_bf_MISR(h5f, output_folder, SPATIAL_RESOLUTION=0.5, VZA_MAX=18, CAMERA='AN'):\n\n # =============================================================================\n # 1. Initialization\n # calculate constant parameters\n # initialize output arrays and output hdf5 file\n # check the number of CERES granules \n # =============================================================================\n\n print(\"-------MISR----->\", h5f)\n print(\"-------FID------<>\", h5f.fid)\n print(\"---->\", type(h5f))\n if type(h5f.fid) is str:\n output_nc_name = h5f.fid.split('/')[-1].replace('TERRA_BF_L1B', 'CLIMARBLE')\n else:\n output_nc_name = h5f.fid.name. \\\n decode(\"utf-8\").split('/')[-1]. \\\n replace('TERRA_BF_L1B', 'CLIMARBLE')\n\n output_nc_name = output_nc_name.replace('.h5', '.nc')\n\n # \n NUM_POINTS = 1 / SPATIAL_RESOLUTION\n NUM_LATS = int(180 / SPATIAL_RESOLUTION)\n NUM_LONS = int(360 / SPATIAL_RESOLUTION)\n\n LAT_EDGES = np.arange(-90.0, 90.0001, SPATIAL_RESOLUTION)\n LON_EDGES = np.arange(-180.0, 180.0001, SPATIAL_RESOLUTION)\n\n # \n orbit_radiance_sum = np.zeros((NUM_LATS, NUM_LONS, 4))\n orbit_radiance_num = np.zeros((NUM_LATS, NUM_LONS, 4))\n orbit_nc_out = os.path.join(output_folder, output_nc_name)\n\n\n # =============================================================================\n # 2. Main processing\n # Loop through each CERES granule and sort radiances into the corresponding lat/lon bins\n # When encounters an asceding granule, script will move to the next granule\n # =============================================================================\n\n # USE MODIS granules to match first and last time of the descending node\n MISR_blocks = get_descending(h5f, 'MISR.{}'.format(CAMERA))\n if MISR_blocks[0] == 0:\n print(\">> IOError( no available MODIS granule in orbit {} )\".format(bf_file))\n return\n\n # LOAD lat/lon here\n lat = h5f['MISR/Geolocation/GeoLatitude'][:]\n lon = h5f['MISR/Geolocation/GeoLongitude'][:]\n\n # LOAD radiance here\n MISR_bands = ['Blue', 'Green', 'Red', 'NIR']\n rads_all = []\n for iband in MISR_bands:\n rads_all.append(h5f['MISR/{}/Data_Fields/{}_Radiance'.format(CAMERA, iband)][:])\n\n # SPECIFY data dimension to interpolate SZA/VZA\n rad_shape = (128, 512)\n \n\n # LOOP through MISR blocks (starts from 0)\n for iblk in MISR_blocks:\n\n # INTERPOLATE sza and vza (this part can be replaced by a more accurate function)\n raw_sza = h5f['MISR/Solar_Geometry/SolarZenith'][iblk]\n raw_vza = h5f['MISR/{}/Sensor_Geometry/{}Zenith'.format(CAMERA, ''.join(c.lower() if i==1 else c for i,c in enumerate(CAMERA)))][iblk]\n np.place(raw_sza, raw_sza<0, np.nan)\n np.place(raw_vza, raw_vza<0, np.nan)\n blk_sza = resize(raw_sza, rad_shape)\n blk_vza = resize(raw_vza, rad_shape)\n\n\n # SELECT lat/lon\n idx_geometry = np.where((blk_sza<89.0) & (blk_vza<VZA_MAX))\n select_lat = lat[iblk][idx_geometry]\n select_lon = lon[iblk][idx_geometry]\n\n\n # SELECT spectral radiances here\n # Aggregate 275-m res data to 1.1-km when necessary\n # Separate band by band to allow one (or more) band(s) failure\n for iband, band_name in enumerate(MISR_bands, start=0):\n blk_rad = rads_all[iband][iblk]\n # blk_rad = h5f['MISR/{}/Data_Fields/{}_Radiance'.format(CAMERA, band_name)][iblk]\n\n if blk_rad.shape == (512, 2048): \n # 275-m res band\n np.place(blk_rad, blk_rad<0, np.nan)\n fnl_blk_rad = np.nanmean(np.reshape(blk_rad, (blk_rad.shape[0]//4, 4, blk_rad.shape[1]//4,4)), axis=(1,3))\n else:\n fnl_blk_rad = blk_rad\n\n\n select_rad = np.nan_to_num(fnl_blk_rad[idx_geometry])\n fnl_idx = np.where((select_rad>0)&(select_rad<1000))[0]\n\n fnl_lat = select_lat[fnl_idx] * -1\n fnl_lon = select_lon[fnl_idx]\n fnl_rad = select_rad[fnl_idx]\n\n try:\n rad_sum, binedges, bin_numbers = binned_statistic_dd((fnl_lat, fnl_lon), fnl_rad, bins=[LAT_EDGES, LON_EDGES], statistic='sum')\n rad_cnt, binedges, bin_numbers = binned_statistic_dd((fnl_lat, fnl_lon), fnl_rad, bins=[LAT_EDGES, LON_EDGES], statistic='count')\n\n orbit_radiance_sum[:, :, iband] += rad_sum\n orbit_radiance_num[:, :, iband] += rad_cnt\n except ValueError:\n continue\n\n # =============================================================================\n # 3. Save results\n # =============================================================================\n orbit_radiance_num = np.array(orbit_radiance_num, dtype='int16')\n\n coords_lats = np.linspace(90-SPATIAL_RESOLUTION/2, -90+SPATIAL_RESOLUTION/2, NUM_LATS)\n coords_lons = np.linspace(-180+SPATIAL_RESOLUTION/2, 180-SPATIAL_RESOLUTION/2, NUM_LONS)\n\n xr_rad_sum = xr.DataArray(orbit_radiance_sum, coords=[('latitude', coords_lats), ('longitude', coords_lons), ('misr_channel', range(4))])\n xr_rad_num = xr.DataArray(orbit_radiance_num, coords=[('latitude', coords_lats), ('longitude', coords_lons), ('misr_channel', range(4))])\n xr_rad_sum.encoding['_FillValue'] = 0\n xr_rad_num.encoding['_FillValue'] = 0\n xr_rad_sum.name = 'MISR spec rad sum'\n xr_rad_num.name = 'MISR spec rad num'\n xr_rad_sum.to_netcdf(orbit_nc_out, 'a')\n xr_rad_num.to_netcdf(orbit_nc_out, 'a')\n return orbit_nc_out",
"def test_CFCalculation_hdf_files():\n from masci_tools.tools.cf_calculation import CFCalculation, CFCoefficient\n\n expected_results = [\n CFCoefficient(l=2, m=0, spin_up=-571.68845386399, spin_down=-558.2336974657351, unit='K', convention='Stevens'),\n CFCoefficient(l=4,\n m=0,\n spin_up=-34.982539807305045,\n spin_down=-21.850435868549834,\n unit='K',\n convention='Stevens'),\n CFCoefficient(l=6, m=0, spin_up=3.8503494779930776, spin_down=2.168215129491561, unit='K',\n convention='Stevens'),\n CFCoefficient(l=6,\n m=-6,\n spin_up=110.50156137060345,\n spin_down=85.58558990378205,\n unit='K',\n convention='Stevens'),\n CFCoefficient(l=6, m=6, spin_up=110.50156137060345, spin_down=85.58558990378205, unit='K', convention='Stevens')\n ]\n\n cf = CFCalculation()\n cf.readPot('files/cf_calculation/CFdata.hdf')\n cf.readCDN('files/cf_calculation/CFdata.hdf')\n results = cf.performIntegration()\n\n assert results == expected_results",
"def read_test_data_file(self, filename):\n if not os.path.isfile(filename):\n raise Error(\"Test data file does not exist or problem with file: \" + filename)\n txt = open(filename, 'r').readlines()\n search_params = {}\n for curlinenum, curline in enumerate(txt):\n if curline.startswith('#END'): # end of search parameters\n break\n search_params[curline.split('=')[0].replace('#', '').strip()] = {\n 'value': curline.split('=')[1].split('/', 1)[0].strip(),\n 'comment': curline.split('/', 1)[1].strip()}\n while not txt[curlinenum].startswith('#1'):\n curlinenum += 1\n hdrline1 = txt[curlinenum]\n while not txt[curlinenum].startswith('#2'):\n curlinenum += 1\n hdrline2 = txt[curlinenum]\n headers1 = [a.strip() for a in hdrline1[2:].split('|')]\n headers2 = [a.strip() for a in hdrline2[2:].split('|')]\n headers = [headers1[i] + ' ' + headers2[i] for i in range(len(headers1))][:-1]\n headers = [a.replace(' mag', 'mag') for a in headers]\n split_at = [a.start() for a in re.finditer('\\|', hdrline1)][:-1]\n split_row = lambda row: [row[i:j].replace('\\n', '') for i, j in zip([0] + split_at, split_at + [None])]\n data = [split_row(row) for row in txt[curlinenum + 2:]]\n df = DataFrame(data, columns=headers)\n df.index = df['id id'].apply(lambda x: x.strip())\n df.index.name = 'NOMAD id'\n columns_to_drop = ['id id']\n df['RAJ2000'] = (df['RA hh mm ss'].apply(lambda x: float(x.split()[0])) +\n df['RA hh mm ss'].apply(lambda x: float(x.split()[1])) / 60. +\n df['RA hh mm ss'].apply(lambda x: float(x.split()[2])) / 3600.) * 15.\n columns_to_drop.append('RA hh mm ss')\n dec_sign = lambda x: -1.0 if x.strip()[0] == '-' else 1.0\n df['DEJ2000'] = (df['DEC dd mm ss'].apply(dec_sign) *\n (df['DEC dd mm ss'].apply(lambda x: float(x.split()[0].replace('+', '').replace('-', ''))) +\n df['DEC dd mm ss'].apply(lambda x: float(x.split()[1])) / 60. +\n df['DEC dd mm ss'].apply(lambda x: float(x.split()[2])) / 3600.))\n columns_to_drop.append('DEC dd mm ss')\n columns_to_drop.append('ExtractID id')\n df = df.drop(columns_to_drop, axis=1)\n for column in df: # check first in each column for ability to convert to integer, then float, then leave alone\n if df[column].dtype == object and column != 'Flags hex':\n if df[column][0].strip().lstrip('-').lstrip('+').isdigit():\n df[column] = df[column].apply(np.int)\n elif df[column][0].strip().lstrip('-').lstrip('+').replace('.', '0').isdigit():\n df[column] = df[column].apply(np.float)\n return search_params, df",
"def spectra_analysis(file_name, sky_file_name): \n\n # read file name and select out the id that we are dealing with\n curr_file_name = file_name.split('.')\n curr_file_name = curr_file_name[0].split('/')\n stk_f_n = curr_file_name[-1]\n cube_id = int(re.search(r'\\d+', stk_f_n).group())\n\n # read catalogue and obtain the HST redshift estimate\n #catalogue = np.load(\"data/matched_catalogue.npy\")\n catalogue = np.load(\"data/low_redshift_catalogue.npy\")\n cat_loc = np.where(catalogue[:,0] == cube_id)[0]\n cube_info = catalogue[cat_loc][0]\n \n hst_redshift = cube_info[7]\n\n # spectra and sky noise data\n spectra_data = spectrum_creator(file_name)\n wl_soln = wavelength_solution(file_name)\n sn_data = sky_noise(sky_file_name)\n\n galaxy_data = spectra_data['galaxy']\n\n # removing baseline from data\n base = peakutils.baseline(galaxy_data, 3)\n gd_mc = galaxy_data - base\n\n # scaling sky-noise to be similar to spectra data\n gd_max = np.amax(galaxy_data)\n sn_data_max = np.amax(sn_data)\n sn_scale = gd_max / sn_data_max\n\n sn_data = sn_data * sn_scale\n\n # spectra lines\n sl = {\n 'emis': {\n '[OII]': '3727',\n 'CaK': '3933',\n 'CaH': '3968',\n 'Hdelta': '4101', \n }, \n 'abs': {'K': '3934.777',\n }\n } \n\n # we can use the redshift from the HST catalogue to define the region to search for\n # the doublet in\n\n # lower and upper bound on wavelength range\n lower_lambda = (1+hst_redshift)*3600\n upper_lambda = (1+hst_redshift)*3850\n\n # x-axis data\n data_h_range = np.linspace(wl_soln['begin'], wl_soln['end'], wl_soln['steps'])\n mask = (lower_lambda < data_h_range) & (data_h_range < upper_lambda) \n\n lambda_data = data_h_range[mask]\n flux_data = gd_mc[mask] \n \n # Finding peaks with PeakUtils\n pu_peaks = peakutils.indexes(flux_data, thres=600, thres_abs=True)\n pu_peaks_x = peakutils.interpolate(lambda_data, flux_data, pu_peaks)\n\n pu_peaks_x = np.sort(pu_peaks_x)\n pu_peaks_x = pu_peaks_x[lower_lambda < pu_peaks_x]\n pu_peaks_x = pu_peaks_x[pu_peaks_x < upper_lambda]\n \n data_dir = 'cube_results/' + stk_f_n\n if not os.path.exists(data_dir):\n os.mkdir(data_dir)\n\n peaks_file = open(data_dir + '/' + stk_f_n + '_peaks.txt', 'w')\n peaks_file.write(\"Peaks found on \" + str(datetime.datetime.now()) + \"\\n\\n\")\n\n peaks_file.write(\"Number Wavelength \\n\")\n for i_peak in range(len(pu_peaks_x)):\n curr_peak = pu_peaks_x[i_peak]\n peaks_file.write(str(i_peak) + \" \" + str(curr_peak) + \"\\n\")\n\n # manually selecting which peak is the [OII] peak - given in wavelength\n if (pu_peaks_x.size != 0):\n otwo_wav = float(pu_peaks_x[0]) \n otwo_acc = float(sl['emis']['[OII]'])\n\n redshift = (otwo_wav / otwo_acc) - 1\n else:\n # accepting HST redshift if cannot find peak\n redshift = hst_redshift\n\n return {'gd_shifted': gd_mc, 'sky_noise': sn_data, 'spectra': sl, 'redshift': \n redshift, 'pu_peaks': pu_peaks_x}",
"def F_read_S5P_nc(self,fn,data_fields,data_fields_l2g=[]):\n from netCDF4 import Dataset\n ncid = Dataset(fn,'r')\n outp = {}\n for i in range(len(data_fields)):\n tmp = ncid[data_fields[i]]\n tmpdtype = tmp.dtype\n if not data_fields_l2g:\n varname = tmp.name\n else:\n varname = data_fields_l2g[i]\n if tmpdtype is \"str\":\n outp[varname] = tmp[:]\n else:\n outp[varname] = np.squeeze(tmp[:])\n ## scale factor already applied?! so confusing\n# try:\n# outp[varname] = outp[varname]*tmp.scale_factor\n# if tmp.scale_factor != 1:\n# print(varname+' has a scale_factor of '+'%s'%tmp.scale_factor)\n# except Exception:\n# #print(e)\n# print(varname+' has no scale_factor!')\n if 'time_utc' in outp.keys():\n UTC_matlab_datenum = np.zeros((len(outp['time_utc']),1),dtype=np.float64)\n for i in range(len(outp['time_utc'])):\n tmp = datetime.datetime.strptime(outp['time_utc'][i],'%Y-%m-%dT%H:%M:%S.%fZ')\n UTC_matlab_datenum[i] = (tmp.toordinal()\\\n +tmp.hour/24.\\\n +tmp.minute/1440.\\\n +tmp.second/86400.\\\n +tmp.microsecond/86400/1000000+366.)\n outp['UTC_matlab_datenum'] = np.tile(UTC_matlab_datenum,(1,outp['latc'].shape[1]))\n else: # hcho l2 does not have time_utc\n # the delta_time field of hcho fills all across track position, but ch4 is one per scanline\n if len(outp['delta_time'].shape) == 1:\n outp['delta_time'] = np.tile(outp['delta_time'][...,None],(1,outp['latc'].shape[1]))\n outp['UTC_matlab_datenum'] = (outp['time']+outp['delta_time']/1000.)/86400.+734139.\n \n outp['across_track_position'] = np.tile(np.arange(1.,outp['latc'].shape[1]+1),\\\n (outp['latc'].shape[0],1)).astype(np.int16)\n return outp",
"def test_cl_fix_file(mock_get_filepath, cl_file, tmp_path):\n mock_get_filepath.return_value = os.path.join(tmp_path,\n 'fixed_cesm2_waccm_cl.nc')\n fix = Cl(None)\n fixed_file = fix.fix_file(cl_file, tmp_path)\n mock_get_filepath.assert_called_once_with(tmp_path, cl_file)\n fixed_dataset = Dataset(fixed_file, mode='r')\n assert fixed_dataset.variables['lev'].standard_name == (\n 'atmosphere_hybrid_sigma_pressure_coordinate')\n assert fixed_dataset.variables['lev'].formula_terms == (\n 'p0: p0 a: a b: b ps: ps')\n assert fixed_dataset.variables['lev'].units == '1'\n np.testing.assert_allclose(fixed_dataset.variables['a'][:], [1.0, 2.0])\n np.testing.assert_allclose(fixed_dataset.variables['b'][:], [0.0, 1.0])\n np.testing.assert_allclose(fixed_dataset.variables['a_bnds'][:],\n [[0.0, 1.5], [1.5, 3.0]])\n np.testing.assert_allclose(fixed_dataset.variables['b_bnds'][:],\n [[-1.0, 0.5], [0.5, 2.0]])",
"def test_compute_Sv_ek80_CW_complex_BB_complex(ek80_cal_path):\n ek80_raw_path = ek80_cal_path / \"2018115-D20181213-T094600.raw\"\n ed = ep.open_raw(ek80_raw_path, sonar_model=\"EK80\")\n ds_Sv = ep.calibrate.compute_Sv(\n ed, waveform_mode=\"CW\", encode_mode=\"complex\"\n )\n assert isinstance(ds_Sv, xr.Dataset)\n ds_Sv = ep.calibrate.compute_Sv(\n ed, waveform_mode=\"BB\", encode_mode=\"complex\"\n )\n assert isinstance(ds_Sv, xr.Dataset)",
"def test_CFCalculation_txt_files():\n from masci_tools.tools.cf_calculation import CFCalculation, CFCoefficient\n\n #Make sure new script produces the same result as old one\n expected_results = [\n CFCoefficient(l=2,\n m=0,\n spin_up=-419.7891726292168,\n spin_down=-414.7152560307904,\n unit='K',\n convention='Stevens'),\n CFCoefficient(l=4,\n m=0,\n spin_up=-35.92607948104669,\n spin_down=-26.384951772020756,\n unit='K',\n convention='Stevens'),\n CFCoefficient(l=6, m=0, spin_up=6.522900740505054, spin_down=5.488104692050172, unit='K', convention='Stevens')\n ]\n\n cf = CFCalculation(reference_radius='cdn')\n cf.readPot('files/cf_calculation/VKS.2.0.dat',\n 'files/cf_calculation/VKS.4.0.dat',\n 'files/cf_calculation/VKS.6.0.dat',\n lm=[(2, 0), (4, 0), (6, 0)])\n cf.readCDN('files/cf_calculation/Nd.dat', header=3)\n cf.cdn['RMT'] = 3.138049652\n results = cf.performIntegration()\n\n assert results == expected_results",
"def sky_orbits(test=True):\n \n t = Table.read('/home/ana/data/baumgardt_positions.fits')\n \n ind_disterr = ~np.isfinite(t['e_Rsun'])\n t['e_Rsun'][ind_disterr] = 0.1 * t['Rsun'][ind_disterr]\n e_max = np.nanmax(t['e_Rsun'][~ind_disterr])\n ind_cap = t['e_Rsun']>e_max\n t['e_Rsun'][ind_cap] = e_max\n \n clusters = ['NGC 3201', 'NGC 4590', 'NGC 5824', 'NGC 5272', 'NGC 5139', 'NGC 5024']\n #clusters = ['NGC 5824', 'NGC 5024']\n N = len(clusters)\n \n match = dict()\n match['NGC 3201'] = dict(streams=['gjoll'], direction=[-1], nstep=[35], gc_label='NGC\\n3201', gcra_off=0*u.deg, gcdec_off=-13*u.deg, gcl_off=0*u.deg, gcb_off=-13*u.deg, stream_label=['$Gj\\\\\\\" oll$'], stream_ra=[-156*u.deg], stream_dec=[-4.5*u.deg], eq_angle=[-45*u.deg], stream_l=[-148*u.deg], stream_b=[-33*u.deg], gal_angle=[22*u.deg])\n \n match['NGC 4590'] = dict(streams=['fjorm'], direction=[1], nstep=[100], gc_label='NGC\\n4590', gcra_off=-15*u.deg, gcdec_off=0*u.deg, gcl_off=-13*u.deg, gcb_off=-10*u.deg, stream_label=['$Fj\\\\\\\" orm$'], stream_ra=[-22*u.deg], stream_dec=[66*u.deg], eq_angle=[35*u.deg], stream_l=[110*u.deg], stream_b=[50*u.deg], gal_angle=[-50*u.deg])\n \n match['NGC 5024'] = dict(streams=['sylgr', 'ravi'], direction=[-1, 1], nstep=[300,500], gc_label='NGC\\n5024', gcra_off=-15*u.deg, gcdec_off=0*u.deg, gcl_off=10*u.deg, gcb_off=-20*u.deg, stream_label=['Sylgr', 'Ravi'], stream_ra=[-70*u.deg, 83*u.deg], stream_dec=[2*u.deg, -47*u.deg], eq_angle=[25*u.deg, 65*u.deg], stream_l=[-110*u.deg, -18.5*u.deg], stream_b=[62*u.deg, -47*u.deg], gal_angle=[30*u.deg, -10*u.deg])\n \n match['NGC 5139'] = dict(streams=['fimbulthul'], direction=[-1], nstep=[70], gc_label='NGC\\n5139', gcra_off=-5*u.deg, gcdec_off=-15*u.deg, gcl_off=0*u.deg, gcb_off=-12*u.deg, stream_label=['Fimbulthul'], stream_ra=[-20*u.deg], stream_dec=[-15*u.deg], eq_angle=[0*u.deg], stream_l=[-20*u.deg], stream_b=[45*u.deg], gal_angle=[0*u.deg])\n \n match['NGC 5272'] = dict(streams=['svol'], direction=[1], nstep=[70], gc_label='NGC\\n5272', gcra_off=-15*u.deg, gcdec_off=10*u.deg, gcl_off=-23*u.deg, gcb_off=-17*u.deg, stream_label=['$Sv\\\\\\\" ol$'], stream_ra=[-2*u.deg], stream_dec=[34*u.deg], eq_angle=[-10*u.deg], stream_l=[55*u.deg], stream_b=[55*u.deg], gal_angle=[-65*u.deg])\n \n match['NGC 5824'] = dict(streams=['triangulum', 'turbio'], direction=[1,1], nstep=[700,1], gc_label='NGC\\n5824', gcra_off=15*u.deg, gcdec_off=-5*u.deg, gcl_off=15*u.deg, gcb_off=-5*u.deg, stream_label=['Triangulum', 'Turbio'], stream_ra=[152*u.deg, 130*u.deg], stream_dec=[32*u.deg, -51*u.deg], eq_angle=[-48*u.deg, 30*u.deg], stream_l=[120*u.deg, -82*u.deg], stream_b=[-31*u.deg, -57*u.deg], gal_angle=[70*u.deg, 105*u.deg])\n \n dt = 0.5*u.Myr\n wangle = 180*u.deg\n ra_off = 120*u.deg\n l_off = 0*u.deg\n \n colors = [mpl.cm.plasma(0.95*x/N) for x in range(N)]\n \n np.random.seed(27529)\n if test:\n Nsample = 1\n else:\n Nsample = 100\n \n plt.close()\n fig = plt.figure(figsize=(12,12))\n \n ax0 = fig.add_subplot(211, projection='mollweide')\n ax1 = fig.add_subplot(212, projection='mollweide')\n ax = [ax0, ax1]\n \n for i in range(N):\n #ind = t['Name']== clusters[i]\n ind = t['Name']==clusters[i]\n t_ = t[ind]\n \n c = coord.SkyCoord(ra=t_['RAJ2000'], dec=t_['DEJ2000'], distance=t_['Rsun'], pm_ra_cosdec=t_['pmRA_'], pm_dec=t_['pmDE'], radial_velocity=t_['RV'], frame='icrs')\n cgal = c.transform_to(coord.Galactic)\n #w0 = gd.PhaseSpacePosition(c.transform_to(gc_frame).cartesian)[0]\n \n color = colors[i]\n alpha_text = 0.8\n \n plt.sca(ax[0])\n plt.plot((c.ra + ra_off).wrap_at(wangle).rad, c.dec.rad, '+', color=color, mew=3, ms=15, label=t_['Name'][0])\n plt.text((c.ra + ra_off + match[clusters[i]]['gcra_off']).wrap_at(wangle).rad, (c.dec + match[clusters[i]]['gcdec_off']).rad, match[clusters[i]]['gc_label'], fontsize='small', ha='center', va='center', alpha=alpha_text)\n \n plt.sca(ax[1])\n plt.plot((cgal.l + l_off).wrap_at(wangle).rad, cgal.b.rad, '+', color=color, mew=3, ms=15, label=t_['Name'][0])\n plt.text((cgal.l + l_off + match[clusters[i]]['gcl_off']).wrap_at(wangle).rad, (cgal.b + match[clusters[i]]['gcb_off']).rad, match[clusters[i]]['gc_label'], fontsize='small', ha='center', va='center', alpha=alpha_text)\n \n\n for j in range(len(match[clusters[i]]['direction'])):\n # sample gc positional uncertainties\n for k in range(-1, Nsample):\n if k==-1:\n c = coord.SkyCoord(ra=t_['RAJ2000'], dec=t_['DEJ2000'], distance=t_['Rsun'], pm_ra_cosdec=t_['pmRA_'], pm_dec=t_['pmDE'], radial_velocity=t_['RV'], frame='icrs')\n w0 = gd.PhaseSpacePosition(c.transform_to(gc_frame).cartesian)[0]\n \n lw = 1.5\n alpha = 1\n else:\n c = coord.SkyCoord(ra=t_['RAJ2000'], dec=t_['DEJ2000'], distance=t_['Rsun'] + np.random.randn()*t_['e_Rsun'], pm_ra_cosdec=t_['pmRA_'] + np.random.randn()*t_['e_pmRA_'], pm_dec=t_['pmDE'] + np.random.randn()*t_['e_pmDE'], radial_velocity=t_['RV'] + np.random.randn()*t_['e_RV'], frame='icrs')\n w0 = gd.PhaseSpacePosition(c.transform_to(gc_frame).cartesian)[0]\n \n lw = 1\n alpha = 0.1\n \n orbit = ham.integrate_orbit(w0, dt=dt*match[clusters[i]]['direction'][j], n_steps=match[clusters[i]]['nstep'][j])\n orbit_eq = orbit.to_coord_frame(coord.ICRS, galactocentric_frame=gc_frame)\n orbit_gal = orbit.to_coord_frame(coord.Galactic, galactocentric_frame=gc_frame)\n \n \n plt.sca(ax[0])\n dra = (orbit_eq.ra+ra_off).wrap_at(wangle)[1:] - (orbit_eq.ra+ra_off).wrap_at(wangle)[:-1]\n if np.any(np.abs(dra)>180*u.deg):\n pos_break = dra>180*u.deg\n ind_break = np.argmax(pos_break)\n ipad = 1\n plt.plot((orbit_eq.ra+ra_off).wrap_at(wangle).rad[:ind_break-ipad], orbit_eq.dec.rad[:ind_break-ipad], '-', color=color, lw=lw, label='', alpha=alpha)\n plt.plot((orbit_eq.ra+ra_off).wrap_at(wangle).rad[ind_break+ipad:], orbit_eq.dec.rad[ind_break+ipad:], '-', color=color, lw=lw, label='', alpha=alpha)\n else:\n plt.plot((orbit_eq.ra+ra_off).wrap_at(wangle).rad, orbit_eq.dec.rad, '-', color=color, lw=lw, label='', alpha=alpha)\n \n plt.sca(ax[1])\n dl = orbit_gal.l.wrap_at(wangle)[1:] - orbit_gal.l.wrap_at(wangle)[:-1]\n if np.any(np.abs(dl)>180*u.deg):\n pos_break = dl>180*u.deg\n ind_break = np.argmax(pos_break)\n ipad = 1\n plt.plot((orbit_gal.l+l_off).wrap_at(wangle).rad[:ind_break-ipad], orbit_gal.b.rad[:ind_break-ipad], '-', color=color, lw=lw, label='', alpha=alpha)\n plt.plot((orbit_gal.l+l_off).wrap_at(wangle).rad[ind_break+ipad:], orbit_gal.b.rad[ind_break+ipad:], '-', color=color, lw=lw, label='', alpha=alpha)\n else:\n plt.plot((orbit_gal.l+l_off).wrap_at(wangle).rad, orbit_gal.b.rad, '-', color=color, lw=lw, label='', alpha=alpha)\n \n # add streams\n pkl = pickle.load(open('../data/streams/data_{:s}.pkl'.format(match[clusters[i]]['streams'][j]), 'rb'))\n cs = coord.SkyCoord(ra=pkl['dec'][0], dec=pkl['dec'][1], frame='icrs')\n cs_gal = cs.transform_to(coord.Galactic)\n \n plt.sca(ax[0])\n plt.plot((cs.ra+ra_off).wrap_at(wangle).rad, cs.dec.rad, 'o', color=color, ms=8, label=match[clusters[i]]['streams'][j])\n plt.text(coord.Longitude(match[clusters[i]]['stream_ra'][j]).wrap_at(wangle).rad, coord.Latitude(match[clusters[i]]['stream_dec'][j]).rad, match[clusters[i]]['stream_label'][j], fontsize='small', alpha=alpha_text, rotation=match[clusters[i]]['eq_angle'][j].value, ha='center', va='center')\n \n plt.sca(ax[1])\n plt.plot((cs_gal.l+l_off).wrap_at(wangle).rad, cs_gal.b.rad, 'o', color=color, ms=8, label=match[clusters[i]]['streams'][j])\n plt.text(coord.Longitude(match[clusters[i]]['stream_l'][j]).wrap_at(wangle).rad, coord.Latitude(match[clusters[i]]['stream_b'][j]).rad, match[clusters[i]]['stream_label'][j], fontsize='small', alpha=alpha_text, rotation=match[clusters[i]]['gal_angle'][j].value, ha='center', va='center')\n \n \n plt.sca(ax[0])\n plt.grid(ls=':')\n plt.xlabel('R.A. [deg]')\n plt.ylabel('Dec [deg]')\n\n plt.gca().xaxis.set_ticklabels([])\n \n xloc = coord.Longitude(np.arange(-150,180,30)*u.deg)\n xloc = np.delete(xloc, [3])\n yloc = coord.Latitude(5*u.deg)\n Nx = len(xloc)\n \n for i in range(Nx):\n plt.text(xloc[i].wrap_at(wangle).rad, yloc.rad, '{:.0f}$\\degree$'.format((xloc[i]-ra_off).wrap_at(wangle).degree), alpha=0.6, ha='center', va='center')\n \n \n plt.sca(ax[1])\n plt.grid(ls=':')\n plt.xlabel('Galactic longitude [deg]')\n plt.ylabel('Galactic latitude [deg]')\n \n plt.gca().xaxis.set_ticklabels([])\n \n xloc = coord.Longitude(np.arange(-150,180,30)*u.deg)\n xloc = np.delete(xloc, [2,3])\n yloc = coord.Latitude(5*u.deg)\n Nx = len(xloc)\n \n for i in range(Nx):\n plt.text(xloc[i].wrap_at(wangle).rad, yloc.rad, '{:.0f}$\\degree$'.format((xloc[i]+l_off).wrap_at(wangle).degree), alpha=0.6, ha='center', va='center')\n \n \n \n plt.tight_layout(h_pad=2)\n plt.savefig('../paper/sky_orbits.pdf')",
"def Test_data():\n print (\"loading test data ...\")\n time_start = time.time()\n data_root = '/media/keziwen/86AA9651AA963E1D'\n\n with h5py.File(join(data_root, './data/test_real2.h5')) as f:\n test_real = f['test_real'][:]\n with h5py.File(join(data_root, './data/test_imag2.h5')) as f:\n test_imag = f['test_imag'][:]\n test_real = np.transpose(test_real, (0, 1, 3, 2))\n test_imag = np.transpose(test_imag, (0, 1, 3, 2))\n test_data = test_real+1j*test_imag\n time_end = time.time()\n print ('dataset has been created using {}s'.format(time_end - time_start))\n return test_data",
"def test_CFCalculation_hdf_files_wybourne_convention():\n from masci_tools.tools.cf_calculation import CFCalculation, CFCoefficient\n\n expected_results = [\n CFCoefficient(l=2,\n m=0,\n spin_up=(-1143.37690772798 + 0j),\n spin_down=(-1116.4673949314702 + 0j),\n unit='K',\n convention='Wybourne'),\n CFCoefficient(l=4,\n m=0,\n spin_up=(-279.86031845844036 + 0j),\n spin_down=(-174.80348694839867 + 0j),\n unit='K',\n convention='Wybourne'),\n CFCoefficient(l=6,\n m=0,\n spin_up=(61.60559164788924 + 0j),\n spin_down=(34.69144207186498 + 0j),\n unit='K',\n convention='Wybourne'),\n CFCoefficient(l=6,\n m=-6,\n spin_up=(116.32750335918315 + 4.696327749935313e-06j),\n spin_down=(90.09789430612014 + 3.6373963939901583e-06j),\n unit='K',\n convention='Wybourne'),\n CFCoefficient(l=6,\n m=6,\n spin_up=(116.32750335918315 - 4.696327749935313e-06j),\n spin_down=(90.09789430612014 - 3.6373963939901583e-06j),\n unit='K',\n convention='Wybourne')\n ]\n\n cf = CFCalculation()\n cf.readPot('files/cf_calculation/CFdata.hdf')\n cf.readCDN('files/cf_calculation/CFdata.hdf')\n results = cf.performIntegration(convert=False)\n\n print(results)\n assert results == expected_results",
"def test_species_to_sdf_file(self):\n path = os.path.join(ARC_PATH, 'arc', 'testing', 'mol.sdf')\n spc = ARCSpecies(label='NCC', smiles='NCC')\n converter.species_to_sdf_file(spc, path)\n with open(path, 'r') as f:\n sdf_content = f.read()\n expected_sdf = \"\"\"\n RDKit 3D\n\n 10 9 0 0 0 0 0 0 0 0999 V2000\n 1.1517 -0.3760 -0.5231 N 0 0 0 0 0 0 0 0 0 0 0 0\n 0.2893 0.4500 0.3115 C 0 0 0 0 0 0 0 0 0 0 0 0\n -1.1415 -0.0561 0.2592 C 0 0 0 0 0 0 0 0 0 0 0 0\n 1.1386 -1.3376 -0.1854 H 0 0 0 0 0 0 0 0 0 0 0 0\n 2.1151 -0.0555 -0.4352 H 0 0 0 0 0 0 0 0 0 0 0 0\n 0.6517 0.4342 1.3447 H 0 0 0 0 0 0 0 0 0 0 0 0\n 0.3279 1.4855 -0.0414 H 0 0 0 0 0 0 0 0 0 0 0 0\n -1.2133 -1.0839 0.6308 H 0 0 0 0 0 0 0 0 0 0 0 0\n -1.7870 0.5726 0.8809 H 0 0 0 0 0 0 0 0 0 0 0 0\n -1.5327 -0.0332 -0.7636 H 0 0 0 0 0 0 0 0 0 0 0 0\n 1 2 1 0\n 1 4 1 0\n 1 5 1 0\n 2 3 1 0\n 2 6 1 0\n 2 7 1 0\n 3 8 1 0\n 3 9 1 0\n 3 10 1 0\nM END\n$$$$\n\"\"\"\n self.assertEqual(sdf_content, expected_sdf)",
"def constant_2015():\n\n #Load the CMIP6 historical\n cubes = iris.load(data_dir+'SO2DMS-em-anthro_input4MIPs_emissions_CMIP_CEDS-v2016-07-26-gr_200001-201412_n48.nc')\n #Get low and high level emissions just in the last year (2014)\n cubes = iris.cube.CubeList([cubes[2],cubes[1]])\n final_cubes = iris.cube.CubeList()\n for cube in cubes:\n final_cube = cube[-12:]\n final_cubes.append(final_cube)\n \n #Set the year-on-year proportional reductions to be nothing\n yoy_rates = calc_perc_reducts()\n yoy_rates = np.array(yoy_rates)\n yoy_rates = np.ones_like(yoy_rates)\n\n #Create coordinates for new nc file between 2014 and 2100\n lat_coord = cubes[0].coord('latitude')\n lon_coord = cubes[0].coord('longitude')\n time_coord = DimCoord(np.arange(95055.,95055.+(2100-2014+1)*360.,30.),standard_name=u'time', units=cf_units.Unit('days since 1750-1-1 00:00:00', calendar='360_day'), long_name=u'time', var_name='time')\n\n #Create the cube date\n cube_data_surf = np.zeros((len(time_coord.points),cubes[0].shape[1],cubes[0].shape[2]))\n cube_data_high = np.zeros((len(time_coord.points),cubes[0].shape[1],cubes[0].shape[2]))\n #Set first year equal to 2014 in CMIP6 historical\n cube_data_surf[:12,...] = final_cubes[0].data\n cube_data_high[:12,...] = final_cubes[1].data\n #Apply equal emissions in all other years too\n for i in range(12,cube_data_surf.shape[0]):\n cube_data_surf[i,...] = cube_data_surf[(i-12),...] * yoy_rates[0,i]\n cube_data_high[i,...] = cube_data_high[(i-12),...] * yoy_rates[1,i]\n #Make the output cubes\n fut_cube_surf = iris.cube.Cube(cube_data_surf,dim_coords_and_dims=[(time_coord,0),(lat_coord, 1),(lon_coord, 2)],standard_name=final_cubes[0].standard_name, long_name=final_cubes[0].long_name, var_name=final_cubes[0].var_name, units=final_cubes[0].units, attributes=final_cubes[0].attributes)\n fut_cube_high = iris.cube.Cube(cube_data_high,dim_coords_and_dims=[(time_coord,0),(lat_coord, 1),(lon_coord, 2)],standard_name=final_cubes[1].standard_name, long_name=final_cubes[1].long_name, var_name=final_cubes[1].var_name, units=final_cubes[1].units, attributes=final_cubes[1].attributes)\n\n fut_cube_high.var_name = 'field569_1'\n fut_cube_high.units='kg/m2/s'\n fut_cube_high.long_name ='HIGH LEVEL SO2 EMISSIONS KG/M2/S'\n fut_cube_surf.var_name = 'field569'\n fut_cube_surf.units='kg/m2/s'\n fut_cube_surf.long_name ='SULPHUR DIOXIDE EMISSIONS'\n\n #Load the DMS cube from standard RCP2.6\n dms_cube = iris.load(data_dir+'DMSSO2NH3_18502100_RCP26_monthly.nc')[0]\n iris.coord_categorisation.add_year(dms_cube,'time',name='year')\n dms_cube = dms_cube.extract(iris.Constraint(year = lambda y: y>=2014))\n\n dms_cube.var_name = 'field570'\n dms_cube.attributes.pop('name')\n dms_cube.coord('time').var_name = 'time'\n dms_cube.coord('time').long_name = 'time'\n\n fut_cube_high = fut_cube_high[:-2]\n fut_cube_surf = fut_cube_surf[:-2]\n\n fut_dms = iris.cube.Cube(dms_cube.data[:,0,::-1,:],dim_coords_and_dims=[(fut_cube_surf.coord('time'),0),(fut_cube_surf.coord('latitude'),1),(fut_cube_surf.coord('longitude'), 2)],standard_name=dms_cube.standard_name, long_name=dms_cube.long_name, var_name=dms_cube.var_name, units=dms_cube.units, attributes=dms_cube.attributes)\n\n #Save the final cubes as netcdf (cutting them to be the same length)\n iris.save(iris.cube.CubeList([fut_dms,fut_cube_high,fut_cube_surf]),data_dir+ \"SO2DMS_const2014.nc\")\n os.system('ncatted -O -a calendar,time,m,c,\"360_day\" '+data_dir+ \"SO2DMS_const2014.nc\")\n\n return",
"def convert_calculations(filename, hdf5_data):\n x1 = []\n\n with open(filename, 'r') as inp:\n for line in inp:\n x1.append(line)\n\n idx = 1\n dset = require_dataset(hdf5_data, structure.H5_ENV_VOLUME, (1,), dtype=settings.NEMOH_FLOAT)\n dset[0] = float(x1[idx].split()[0])\n set_hdf5_attributes(dset, structure.H5_ENV_VOLUME_ATTR)\n idx += 1\n\n dset = require_dataset(hdf5_data, structure.H5_ENV_GRAVITY, (1,), dtype=settings.NEMOH_FLOAT)\n dset[0] = float(x1[idx].split()[0])\n set_hdf5_attributes(dset, structure.H5_ENV_GRAVITY_ATTR)\n idx += 1\n\n\n dset = require_dataset(hdf5_data, structure.H5_ENV_DEPTH, (1,), dtype=settings.NEMOH_FLOAT)\n dset[0] = float(x1[idx].split()[0])\n set_hdf5_attributes(dset, structure.H5_ENV_DEPTH_ATTR)\n idx += 1\n\n dset = require_dataset(hdf5_data, structure.H5_ENV_WAVE_POINT, (2,), dtype=settings.NEMOH_FLOAT)\n x2 = x1[idx].split()\n dset[0] = float(x2[0])\n dset[1] = float(x2[1])\n set_hdf5_attributes(dset, structure.H5_ENV_WAVE_POINT_ATTR)\n\n idx = 6\n\n num_bodies = int(x1[idx].split()[0])\n\n for i in range(num_bodies):\n\n body = structure.H5_BODIES + structure.H5_BODY_BASE + str(i+1) + '/'\n idx += 2\n\n mesh_x = []\n\n mesh_path = os.path.join(os.path.abspath(os.path.dirname(filename)), str(x1[idx].split()[0]).strip(' \\t\\n\\r'))\n\n with open(mesh_path, 'r') as mesh_file:\n for line in mesh_file:\n mesh_x.append(line)\n\n idx += 1\n x2 = x1[idx].split()\n\n num_points = int(x2[0])\n num_panels = int(x2[1])\n dset = require_dataset(hdf5_data, body + structure.H5_BODY_NUM_POINTS, (1, ), dtype=settings.NEMOH_INT)\n dset[0] = num_points\n set_hdf5_attributes(dset, structure.H5_BODY_NUM_POINTS_ATTR)\n\n dset = require_dataset(hdf5_data, body + structure.H5_BODY_NUM_PANELS, (1, ), dtype=settings.NEMOH_INT)\n dset[0] = num_panels\n set_hdf5_attributes(dset, structure.H5_BODY_NUM_PANELS_ATTR)\n\n mesh_idx = 0\n dset = require_dataset(hdf5_data, body + structure.H5_BODY_MESH, (num_points+num_panels+1, 4), dtype=settings.NEMOH_FLOAT)\n mesh_x2 = mesh_x[mesh_idx].split()\n set_hdf5_attributes(dset, structure.H5_BODY_MESH_ATTR)\n\n dset[0, 0] = int(mesh_x2[0])\n dset[0, 1] = int(mesh_x2[1])\n\n for j in range(1, num_points+num_panels+1):\n mesh_idx += 1\n mesh_x2 = mesh_x[mesh_idx].split()\n dset[j, :] = [float(x) for x in mesh_x2[:4]]\n\n if j == num_points:\n mesh_idx += 1\n\n idx += 1\n num = int(x1[idx].split()[0])\n dset = require_dataset(hdf5_data, body + structure.H5_FREEDOM_DEGREE, (num, 7), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_FREEDOM_DEGREE_ATTR)\n for j in range(num):\n idx += 1\n x2 = x1[idx].split()\n dset[j, :] = np.array([float(x) for x in x2[:7]])\n\n idx += 1\n num = int(x1[idx].split()[0])\n dset = require_dataset(hdf5_data, body + structure.H5_GENERALISED_FORCES, (num, 7), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_GENERALISED_FORCES_ATTR)\n for j in range(num):\n idx += 1\n x2 = x1[idx].split()\n dset[j, :] = [float(x) for x in x2[:7]]\n\n idx += 1\n num = int(x1[idx].split()[0])\n for j in range(num):\n idx += 1\n\n idx += 2\n x2 = x1[idx].split()\n\n dset = require_dataset(hdf5_data, structure.H5_NUM_WAVE_FREQUENCIES, (1,), dtype=settings.NEMOH_INT)\n set_hdf5_attributes(dset, structure.H5_NUM_WAVE_FREQUENCIES_ATTR)\n dset[0] = int(x2[0])\n dset = require_dataset(hdf5_data, structure.H5_MIN_WAVE_FREQUENCIES, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_MIN_WAVE_FREQUENCIES_ATTR)\n dset[0] = float(x2[1])\n dset = require_dataset(hdf5_data, structure.H5_MAX_WAVE_FREQUENCIES, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_MAX_WAVE_FREQUENCIES_ATTR)\n dset[0] = float(x2[2])\n idx += 1\n x2 = x1[idx].split()\n dset = require_dataset(hdf5_data, structure.H5_NUM_WAVE_DIRECTIONS, (1,), dtype=settings.NEMOH_INT)\n set_hdf5_attributes(dset, structure.H5_NUM_WAVE_DIRECTIONS_ATTR)\n dset[0] = int(x2[0])\n\n dset = require_dataset(hdf5_data, structure.H5_MIN_WAVE_DIRECTIONS, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_MIN_WAVE_DIRECTIONS_ATTR)\n dset[0] = float(x2[1])\n\n dset = require_dataset(hdf5_data, structure.H5_MAX_WAVE_DIRECTIONS, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_MAX_WAVE_DIRECTIONS_ATTR)\n dset[0] = float(x2[2])\n\n idx += 2\n x2 = x1[idx].split()\n\n dset = require_dataset(hdf5_data, structure.H5_COMPUTE_IRF, (1,), dtype=settings.NEMOH_INT)\n set_hdf5_attributes(dset, structure.H5_COMPUTE_IRF_ATTR)\n dset[0] = int(x2[0])\n\n dset = require_dataset(hdf5_data, structure.H5_IRF_TIME_STEP, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_IRF_TIME_STEP_ATTR)\n dset[0] = float(x2[1])\n dset = require_dataset(hdf5_data, structure.H5_IRF_DURATION, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_IRF_DURATION_ATTR)\n dset[0] = float(x2[2])\n\n idx += 1\n x2 = x1[idx].split()\n dset = require_dataset(hdf5_data, structure.H5_SHOW_PRESSURE, (1,), dtype=settings.NEMOH_INT)\n set_hdf5_attributes(dset, structure.H5_SHOW_PRESSURE_ATTR)\n dset[0] = int(x2[0])\n\n idx += 1\n x2 = x1[idx].split()\n dset = require_dataset(hdf5_data, structure.H5_KOCHIN_NUMBER, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_KOCHIN_NUMBER_ATTR)\n dset[0] = float(x2[0])\n dset = require_dataset(hdf5_data, structure.H5_KOCHIN_MIN, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_KOCHIN_MIN_ATTR)\n dset[0] = float(x2[1])\n dset = require_dataset(hdf5_data, structure.H5_KOCHIN_MAX, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_KOCHIN_MAX_ATTR)\n dset[0] = float(x2[2])\n\n\n idx += 1\n x2 = x1[idx].split()\n dset = require_dataset(hdf5_data, structure.H5_FREE_SURFACE_POINTS_X, (1,), dtype=settings.NEMOH_INT)\n set_hdf5_attributes(dset, structure.H5_FREE_SURFACE_POINTS_X_ATTR)\n dset[0] = int(x2[0])\n dset = require_dataset(hdf5_data, structure.H5_FREE_SURFACE_POINTS_Y, (1,), dtype=settings.NEMOH_INT)\n set_hdf5_attributes(dset, structure.H5_FREE_SURFACE_POINTS_Y_ATTR)\n dset[0] = int(x2[1])\n dset = require_dataset(hdf5_data, structure.H5_FREE_SURFACE_DIMENSION_X, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_FREE_SURFACE_DIMENSION_X_ATTR)\n dset[0] = float(x2[2])\n dset = require_dataset(hdf5_data, structure.H5_FREE_SURFACE_DIMENSION_Y, (1,), dtype=settings.NEMOH_FLOAT)\n set_hdf5_attributes(dset, structure.H5_FREE_SURFACE_DIMENSION_Y_ATTR)\n dset[0] = float(x2[3])",
"def initialize_data(self , station = '', datasets = {} ):\n \n self.datasets = datasets\n self.datasets_keys = datasets.keys()\n self.station = station\n \n data = {} # container for the data of each dataset\n source_configuration = {} # container for the source_configuration of each dataset\n \n\n \n \"\"\" Looping over the datasets \"\"\"\n logging.info('*** Reading and Initializing the data from the netCDF files ')\n \n \n for k,v in datasets.items() :\n logging.info(' Initialising the dataset: *** %s ' , k )\n data[k] = {} \n data['cdm_tables'] = {} \n \n ### alternative with xarray \n #ds = xr.load_dataset(v) \n #observations_table = xr.open_dataset(v , engine = 'h5netcdf' , group = 'observations_table') \n \n ### alternative with netCDF4\n #ds = nc.Dataset(v) \n #data[k]['dateindex'] = ds.variables['dateindex'][0,:] # storing the dateindex \n \n ###for h5py but cant extract date time units !!!\n ds = h5py.File(v , driver=\"core\" ) \n data[k]['df'] = ds # storing the entire file \n try: \n data[k]['source_file'] = ds['source_configuration']['source_file'][0]\n except:\n data[k]['source_file'] = str(v) # temp fix \n \n #data[k]['product_code'] = ds['source_configuration']['product_code'][0] \n #data[k]['recordtimestamp'] = ds['recordtimestamp'].value\n #data[k]['recordindex'] = ds['recordindex'].value \n #ds.close() \n logging.debug('Reading the file with h5py ')\n \n \n # add here appending datasets for the case of ncar_w and ncar_t \n \n \n self.data = data\n self.make_dataframe()\n ds.close()\n \n \"\"\" Reading the header_table, station_configuration, source_configuration \"\"\"\n for k,v in datasets.items() : \n \n #d = xr.open_dataset(v , engine = 'h5netcdf' ) \n #data[k]['recordtimestamp'] = d['recordtimestamp'].values\n #data[k]['recordindex'] = d['recordindex'].values \n \n \n d = xr.open_dataset(v , engine = 'h5netcdf' , group = 'station_configuration') \n data[k]['station_configuration'] = d.to_dataframe() \n #data[k]['station_configuration'] = d ### USELESS ? \n logging.debug('Done with %s station_configuration' , str(k) )\n \n \n d = xr.open_dataset(v , engine = 'h5netcdf' , group = 'header_table') \n logging.debug('Loading the header_table') \n if 'header_table' not in list( self.attributes.keys() ): # saving the attributes to be re-applied at the end\n self.attributes['header_table'] = {}\n for var in d.variables:\n self.attributes['header_table'][var] = {}\n self.attributes['header_table'][var]['description'] = d[var].description\n self.attributes['header_table'][var]['external_table'] = d[var].external_table \n data[k]['header_table'] = d.to_dataframe() \n logging.debug('Done with %s ' , k )\n \n logging.info(\"*** Loading the observations_table (might take time) %s\" , k ) \n d = xr.open_dataset(v , engine = 'h5netcdf' , group = 'observations_table') \n \n if 'observations_table' not in list( self.attributes.keys() ): # saving the attributes to be re-applied at the end\n self.attributes['observations_table'] = {}\n for var in d.variables:\n self.attributes['observations_table'][var] = {}\n self.attributes['observations_table'][var]['description'] = d[var].description\n self.attributes['observations_table'][var]['external_table'] = d[var].external_table\n \n \n logging.info(\"*** Loading the source configuration %s\" , k ) \n try: \n d = xr.open_dataset(v , engine = 'h5netcdf' , group = 'source_configuration')\n d = d.isel(hdrlen=[0])\n data[k]['source_configuration'] = d.to_dataframe() ### USELESS ? \n logging.debug('Done with %s source_configuration' , k )\n except: \n data[k]['source_configuration']= pd.DataFrame(np.array( [ [ self.data[k]['source_file'] ] ] ) , columns=['source_file'] ) \n \n if k == 'era5_1': # reading the whole era5_1 feedback (including reanalysis)\n d = xr.open_dataset(v , engine = 'h5netcdf' , group = 'era5fb') \n data[k]['era5fb'] = d.to_dataframe() \n logging.debug('Done with %s era5 feedback ', k )\n \n \"\"\" Reading the CDM tables that do not depend on specific stations or observations (fixed values), for the first file only \"\"\" \n if list(datasets.keys()).index(k) == 0 :\n for t in [ 'crs' , 'observed_variable', 'units' , 'z_coordinate_type' , 'station_type']: \n \n d = xr.open_dataset(v , engine = 'h5netcdf' , group = t) \n #data['cdm_tables'][t] = d.to_dataframe() ### USELESS ?\n data['cdm_tables'][t] = d \n \n d.close() \n ds.close()\n\n \"\"\" Reading the name of the original source file \"\"\"\n source_configuration[k] = {} \n source_configuration[k]['source_file'] = [ c for c in v.split('/') if '.nc' in c][0]\n\n \n \"\"\" Storing the station configurations \"\"\" \n self.source_configuration = source_configuration \n \n \"\"\" Making all date_times \"\"\" \n self.make_all_datetime()\n \n \n \"\"\" feedback columns \"\"\"\n if 'era5_1' in list (self.data.keys() ):\n self.fb_columns = list(self.data['era5_1']['era5fb'].columns ) \n else:\n self.fb_columns = ['empty']",
"def test_brainvision_data():\n assert_raises(IOError, read_raw_brainvision, vmrk_path)\n assert_raises(ValueError, read_raw_brainvision, vhdr_path, montage,\n preload=True, scale=\"foo\")\n with warnings.catch_warnings(record=True) as w: # event parsing\n raw_py = _test_raw_reader(\n read_raw_brainvision, vhdr_fname=vhdr_path, montage=montage,\n eog=eog)\n assert_true(all('parse triggers that' in str(ww.message) for ww in w))\n assert_true('RawBrainVision' in repr(raw_py))\n\n assert_equal(raw_py.info['highpass'], 0.)\n assert_equal(raw_py.info['lowpass'], 250.)\n\n picks = pick_types(raw_py.info, meg=False, eeg=True, exclude='bads')\n data_py, times_py = raw_py[picks]\n\n # compare with a file that was generated using MNE-C\n raw_bin = Raw(eeg_bin, preload=True)\n picks = pick_types(raw_py.info, meg=False, eeg=True, exclude='bads')\n data_bin, times_bin = raw_bin[picks]\n\n assert_array_almost_equal(data_py, data_bin)\n assert_array_almost_equal(times_py, times_bin)\n\n # Make sure EOG channels are marked correctly\n for ch in raw_py.info['chs']:\n if ch['ch_name'] in eog:\n assert_equal(ch['kind'], FIFF.FIFFV_EOG_CH)\n elif ch['ch_name'] == 'STI 014':\n assert_equal(ch['kind'], FIFF.FIFFV_STIM_CH)\n elif ch['ch_name'] in raw_py.info['ch_names']:\n assert_equal(ch['kind'], FIFF.FIFFV_EEG_CH)\n else:\n raise RuntimeError(\"Unknown Channel: %s\" % ch['ch_name'])\n\n # test loading v2\n read_raw_brainvision(vhdr_v2_path, eog=eog, preload=True,\n response_trig_shift=1000)",
"def test_compute_Sv_ek80_BB_complex(ek80_path):\n ek80_raw_path = str(\n ek80_path.joinpath('ar2.0-D20201209-T235955.raw')\n ) # CW complex\n echodata = ep.open_raw(ek80_raw_path, sonar_model='EK80')\n ds_Sv = ep.calibrate.compute_Sv(\n echodata, waveform_mode='BB', encode_mode='complex'\n )\n assert isinstance(ds_Sv, xr.Dataset) is True\n ds_TS = ep.calibrate.compute_TS(\n echodata, waveform_mode='BB', encode_mode='complex'\n )\n assert isinstance(ds_TS, xr.Dataset) is True",
"def test_defect_calculation_control():\n csnet = example.control.cs_network()\n slaves, connections = csnet\n step_sizes = {name: Fraction(1, 2) for name in slaves}\n make_zoh: cs.ConverterConstructor = cs.Zoh\n rate_converters = {cs.Connection(src, dst): make_zoh for dst, src in connections.items()}\n initial_tokens = {sdf.Dst('PI', 'u'): [0.], sdf.Dst('PT2', 'u'): [0.]}\n cosim = csnet, step_sizes, rate_converters, initial_tokens\n defect = cs.evaluate(cosim, Fraction(20.))\n for val in defect.connection.values():\n assert val < float('inf')\n for val in defect.output.values():\n assert val < float('inf')",
"def read_h5(self):\n infile = h5py.File(self.inf_name,'r')\n\n vardict = self.labdict\n #store data with the correct labels\n for k in infile['plasma/1d'].keys():\n try:\n vardict[k] = infile[self.labdict[k]].value\n except:\n vardict[k] = []\n\n vardict['a_ions']=infile['/plasma/anum'].value\n vardict['znum']=infile['/plasma/znum'].value\n \n\n self.rho_in = vardict['rho']\n self._rho_vol = infile['distributions/rhoDist/abscissae/dim1'].value[1:]\n self._volumes = infile['distributions/rhoDist/shellVolume'].value\n self.nrho_in = np.size(self.rho_in)\n\n if vardict['a_ions'][0]!='/':\n self.nspec = len(vardict['a_ions'])\n else:\n self.nspec = vardict['ni'].shape[1]\n print(\"Number of ions: \", self.nspec)\n if len(vardict['a_ions'])!=len(vardict['znum']):\n print(\"ERROR! array of A and Z don't have the same length\")\n\n self.A = vardict['a_ions']\n self.Z = vardict['znum']\n self.nion = self.nspec\n \n self.te_in = vardict['te'][:]\n self.ne_in = vardict['ne'][:]\n self.ti_in = vardict['ti'][:]\n ni1_in = vardict['ni'][:,0]\n self.ni_in = np.zeros((self.nion, self.nrho_in),dtype=float)\n self.ni_in[0,:] = ni1_in\n if self.nion==2:\n ni2_in = vardict['ni'][:,1]\n self.ni_in[1,:] = ni2_in\n elif self.nion==3:\n ni2_in = vardict['ni'][:,1]\n ni3_in = vardict['ni'][:,2]\n self.ni_in[1,:] = ni2_in\n self.ni_in[2,:] = ni3_in\n\n try:\n self.vt_in = vardict['vtor']\n except:\n self.vt_in = np.zeros(self.nrho_in,dtype=float)\n\n try:\n self.zeff_in = vardict['zeff'][:]\n except:\n self.zeff_in = np.zeros(self.nrho_in,dtype=float)\n\n self.ni = np.zeros((self.nion, self.nrho),dtype = float)\n self.spline()",
"def read_satellite(filename, ftype):\n #ftype = 'l3c'\n #filename = '/gws/nopw/j04/cds_c3s_sst/output/v2.6.0/l3c/AVHRR19_G/2018/03/01/20180301120000-C3S-L3C_GHRSST-SSTskin-AVHRR19_G-ICDR2.0_day-v02.0-fv01.0.nc'\n #ftype = 'l4'\n #filename = '/gws/nopw/j04/cds_c3s_sst/public/data/ICDR_v2/Analysis/L4/v2.0/2018/01/01/20180101120000-C3S-L4_GHRSST-SSTdepth-OSTIA-GLOB_ICDR2.0-v02.0-fv01.0.nc'\n print \"Reading %s file: %s\" % (ftype, filename)\n \n # Read data - L4 or L3C (note L4 mask and L3C quality level have same array name)\n ncin = netCDF4.Dataset(filename)\n if ftype == 'l4':\n lon = ncin.variables['lon'][:]\n lat = ncin.variables['lat'][:]\n time_read = ncin.variables['time'][:]\n sst = ncin.variables['analysed_sst'][:]\n unc = ncin.variables['analysis_uncertainty'][:]\n sea_ice_frac = ncin.variables['sea_ice_fraction'][:]\n ql = ncin.variables['mask'][:]\n sstfill = ncin.variables['analysed_sst']._FillValue\n sstao = ncin.variables['analysed_sst'].add_offset\n sstsf = ncin.variables['analysed_sst'].scale_factor\n elif ftype == 'l3c':\n lon = ncin.variables['lon'][:]\n lat = ncin.variables['lat'][:]\n time_read = ncin.variables['time'][:]\n time_bnds = ncin.variables['time_bnds'][:]\n sst = ncin.variables['sea_surface_temperature'][:]\n sst_depth = ncin.variables['sea_surface_temperature_depth'][:]\n sst_dtime = ncin.variables['sst_dtime'][:]\n sst_depth_dtime = ncin.variables['sst_depth_dtime'][:]\n sses_bias = ncin.variables['sses_bias'][:]\n sses_sd = ncin.variables['sses_standard_deviation'][:]\n sst_depth_total_unc = ncin.variables['sst_depth_total_uncertainty'][:]\n l2p_flags = ncin.variables['l2p_flags'][:]\n ql = ncin.variables['quality_level'][:]\n wind_speed = ncin.variables['wind_speed'][:]\n large_scale_cor_unc = ncin.variables['large_scale_correlated_uncertainty'][:]\n synop_cor_unc = ncin.variables['synoptically_correlated_uncertainty'][:]\n uncor_unc = ncin.variables['uncorrelated_uncertainty'][:]\n adj_unc = ncin.variables['adjustment_uncertainty'][:]\n aerosol_dyn_ind = ncin.variables['aerosol_dynamic_indicator'][:]\n sens = ncin.variables['sensitivity'][:]\n tfill = ncin.variables['sst_dtime']._FillValue\n sstfill = ncin.variables['sea_surface_temperature']._FillValue\n sstao = ncin.variables['sea_surface_temperature'].add_offset\n sstsf = ncin.variables['sea_surface_temperature'].scale_factor\n else:\n print 'ftype not recognised or supported'\n \n # Create time field\n # -> If L4 then create a time field set to time in L4 file\n # -> Also add a time fill value to keep coding simple later on\n if ftype == 'l4':\n time = np.empty((7200,3600))\n time[:,:] = time_read\n tfill = -2147483648\n else:\n time = copy.deepcopy(sst_dtime) # Need to make a hard copy\n mask = sst_dtime.mask == False; mask = mask[0,:,:]\n row, col = np.where(mask==True)\n time.data[0, row, col] = time.data[0,row, col] + time_read\n \n # Create output structure\n if ftype == 'l4':\n data = dict(lon=lon,\n lat=lat,\n time_read=time_read,\n time=time,\n sst=sst,\n unc=unc,\n sea_ice_frac=sea_ice_frac,\n ql=ql,\n tfill=tfill,\n sstfill=sstfill,\n sstao=sstao,\n sstsf=sstsf)\n elif ftype == 'l3c':\n data = dict(lon=lon,\n lat=lat,\n time_read=time_read,\n time=time,\n time_bnds=time_bnds,\n sst=sst,\n sst_depth=sst_depth,\n sst_dtime=sst_dtime,\n sst_depth_dtime=sst_depth_dtime,\n sses_bias=sses_bias,\n sses_sd=sses_sd,\n sst_depth_total_unc=sst_depth_total_unc,\n l2p_flags=l2p_flags,\n ql=ql,\n wind_speed=wind_speed,\n large_scale_cor_unc=large_scale_cor_unc,\n synop_cor_unc=synop_cor_unc,\n uncor_unc=uncor_unc,\n adj_unc=adj_unc,\n aerosol_dyn_ind=aerosol_dyn_ind,\n sens=sens,\n tfill=tfill,\n sstfill=sstfill,\n sstao=sstao,\n sstsf=sstsf)\n else:\n print 'ftype not recognised or supported'\n \n return data",
"def read_skyh5(\n self, filename, run_check=True, check_extra=True, run_check_acceptability=True\n ):\n with h5py.File(filename, \"r\") as fileobj:\n if \"/Header\" not in fileobj:\n raise ValueError(\"This is not a proper skyh5 file.\")\n\n init_params = {\"filename\": os.path.basename(filename)}\n\n with h5py.File(filename, \"r\") as fileobj:\n # extract header information\n header = fileobj[\"/Header\"]\n header_params = [\n \"_Ncomponents\",\n \"_Nfreqs\",\n \"_component_type\",\n \"_spectral_type\",\n \"_history\",\n \"_name\",\n \"_nside\",\n \"_hpx_order\",\n \"_hpx_inds\",\n \"_freq_array\",\n \"_freq_edge_array\",\n \"_reference_frequency\",\n \"_spectral_index\",\n \"_stokes_error\",\n \"_beam_amp\",\n \"_extended_model_group\",\n ]\n\n optional_params = [\n \"_hpx_order\",\n \"_freq_array\",\n \"_freq_edge_array\",\n \"_reference_frequency\",\n \"_spectral_index\",\n \"_stokes_error\",\n \"_beam_amp\",\n \"_extended_model_group\",\n ]\n\n self.component_type = header[\"component_type\"][()].tobytes().decode(\"utf-8\")\n\n if self.component_type != \"healpix\":\n optional_params.extend([\"_nside\", \"_hpx_inds\"])\n if \"skycoord\" in header:\n skycoord_dict = {}\n for key in header[\"skycoord\"]:\n if key in [\"frame\", \"representation_type\"]:\n expected_type = str\n else:\n expected_type = None\n skycoord_dict[key] = _get_value_hdf5_group(\n header[\"skycoord\"], key, expected_type\n )\n init_params[\"skycoord\"] = SkyCoord(**skycoord_dict)\n else:\n if \"lat\" in header and \"lon\" in header and \"frame\" in header:\n header_params += [\"lat\", \"lon\", \"frame\"]\n optional_params += [\"lat\", \"lon\", \"frame\"]\n elif \"ra\" in header and \"dec\" in header:\n header_params += [\"ra\", \"dec\"]\n optional_params += [\"ra\", \"dec\"]\n else:\n raise ValueError(\n \"No component location information found in file.\"\n )\n warnings.warn(\n \"Parameter skycoord not found in skyh5 file. \"\n \"This skyh5 file was written by an older version of pyradiosky. \"\n \"Consider re-writing this file to ensure future compatibility\"\n )\n else:\n optional_params.append(\"_name\")\n\n if \"hpx_frame\" in header:\n if isinstance(header[\"hpx_frame\"], h5py.Dataset):\n # hpx_frame was stored as a string\n frame_str = _get_value_hdf5_group(header, \"hpx_frame\", str)\n dummy_coord = SkyCoord(0, 0, unit=\"rad\", frame=frame_str)\n init_params[\n \"hpx_frame\"\n ] = dummy_coord.frame.replicate_without_data(copy=True)\n else:\n # hpx_frame was stored as a nested dset\n skycoord_dict = {}\n for key in header[\"hpx_frame\"]:\n if key in [\"frame\", \"representation_type\"]:\n expected_type = str\n else:\n expected_type = None\n skycoord_dict[key] = _get_value_hdf5_group(\n header[\"hpx_frame\"], key, expected_type\n )\n dummy_coord = SkyCoord(0, 0, unit=\"rad\", **skycoord_dict)\n init_params[\n \"hpx_frame\"\n ] = dummy_coord.frame.replicate_without_data(copy=True)\n elif \"frame\" in header:\n # frame was stored as a string\n frame_str = _get_value_hdf5_group(header, \"frame\", str)\n dummy_coord = SkyCoord(0, 0, unit=\"rad\", frame=frame_str)\n init_params[\"hpx_frame\"] = dummy_coord.frame.replicate_without_data(\n copy=True\n )\n\n for par in header_params:\n if par in [\"lat\", \"lon\", \"frame\", \"ra\", \"dec\"]:\n parname = par\n if par == \"frame\":\n expected_type = \"str\"\n else:\n expected_type = Quantity\n else:\n param = getattr(self, par)\n parname = param.name\n expected_type = param.expected_type\n\n # skip optional params if not present\n if par in optional_params:\n if parname not in header:\n continue\n\n if parname not in header:\n raise ValueError(\n f\"Expected parameter {parname} is missing in file.\"\n )\n\n value = _get_value_hdf5_group(header, parname, expected_type)\n\n if parname == \"nside\":\n value = int(value)\n\n init_params[parname] = value\n\n # check that the parameters not passed to the init make sense\n if init_params[\"component_type\"] == \"healpix\":\n if init_params[\"Ncomponents\"] != init_params[\"hpx_inds\"].size:\n raise ValueError(\n \"Ncomponents is not equal to the size of 'hpx_inds'.\"\n )\n else:\n if init_params[\"Ncomponents\"] != init_params[\"name\"].size:\n raise ValueError(\"Ncomponents is not equal to the size of 'name'.\")\n\n if \"freq_array\" in init_params.keys():\n if init_params[\"Nfreqs\"] != init_params[\"freq_array\"].size:\n raise ValueError(\"Nfreqs is not equal to the size of 'freq_array'.\")\n\n if init_params[\"spectral_type\"] == \"subband\":\n if \"freq_edge_array\" not in init_params.keys():\n try:\n init_params[\n \"freq_edge_array\"\n ] = _get_freq_edges_from_centers(\n init_params[\"freq_array\"], self._freq_array.tols\n )\n except ValueError:\n warnings.warn(\n \"No freq_edge_array in this file and frequencies are \"\n \"not evenly spaced, so spectral_type will be set to \"\n \"'full' rather than 'subband'.\"\n )\n init_params[\"spectral_type\"] = \"full\"\n\n # remove parameters not needed in __init__\n init_params.pop(\"Ncomponents\")\n init_params.pop(\"Nfreqs\")\n\n # get stokes array\n dgrp = fileobj[\"/Data\"]\n init_params[\"stokes\"] = dgrp[\"stokes\"] * units.Unit(\n dgrp[\"stokes\"].attrs[\"unit\"]\n )\n # frame is a new parameter, check if it exists and try to read\n # otherwise default to ICRS (the old assumed frame.)\n if \"skycoord\" not in init_params and self.component_type != \"healpix\":\n if \"frame\" in header:\n init_params[\"frame\"] = header[\"frame\"][()].tobytes().decode(\"utf8\")\n else:\n warnings.warn(\n \"No frame available in this file, assuming 'icrs'. \"\n \"Consider re-writing this file to ensure future compatility.\"\n )\n init_params[\"frame\"] = \"icrs\"\n\n if self.component_type == \"healpix\" and \"hpx_frame\" in init_params.keys():\n init_params[\"frame\"] = init_params[\"hpx_frame\"]\n del init_params[\"hpx_frame\"]\n\n if self.component_type == \"healpix\" and \"frame\" not in init_params:\n warnings.warn(\n \"No frame available in this file, assuming 'icrs'. \"\n \"Consider re-writing this file to ensure future compatility.\"\n )\n init_params[\"frame\"] = \"icrs\"\n\n self.__init__(**init_params)\n\n if run_check:\n self.check(\n check_extra=check_extra, run_check_acceptability=run_check_acceptability\n )",
"def run_main_test():\r\n\r\n print(\"\"\"\r\n +++++++++++++++++++++++++++++++++++++++++++\r\n +++ Performing Main LZJD Full File Test +++\r\n +++++++++++++++++++++++++++++++++++++++++++\r\n \"\"\")\r\n # iterate over the files in the directory\r\n for f in listdir(SRC):\r\n if isfile(join(SRC, f)):\r\n # prepare a dictionary with the digests ready to compare\r\n DIGESTS[f] = {'src': None, 'r2': None, 'ghidra': None}\r\n\r\n # calculate digest of src file\r\n DIGESTS[f]['src'] = digest(join(SRC, f))\r\n\r\n # name adjustment\r\n f2 = f.replace(\".c\", \".o\")\r\n\r\n # calculate digest of ghidra and r2 outputs\r\n DIGESTS[f]['ghidra'] = digest(join(GHIDRA_PATH, GHIDRA_NAME.format(f2)))\r\n DIGESTS[f]['r2'] = digest(join(R2DEC_PATH, R2DEC_NAME.format(f2)))\r\n\r\n # obtain the similarity from source\r\n SCORES[f] = {'ghidra': get_lzjd_sim(DIGESTS[f]['src'], DIGESTS[f]['ghidra']),\r\n 'r2': get_lzjd_sim(DIGESTS[f]['src'], DIGESTS[f]['r2']),\r\n 'x': get_lzjd_sim(DIGESTS[f]['ghidra'], DIGESTS[f]['r2'])}\r\n\r\n gidra_doms = 0\r\n for f in SCORES:\r\n print(\"{0:12}: Scores G:{1:20} R2:{2:20} X:{3:20} D:{4:20}\".format(f,\r\n SCORES[f]['ghidra'],\r\n SCORES[f]['r2'],\r\n SCORES[f]['x'],\r\n SCORES[f]['ghidra'] - SCORES[f]['r2']))\r\n if SCORES[f]['ghidra'] > SCORES[f]['r2']:\r\n gidra_doms += 1\r\n print(\"Ghidra Dominated on {} files\".format(gidra_doms))\r\n # This section of code prepares visualizations on the data for easy analysis\r\n plot_scatter(SCORES, title=\"LZJD Full File scores\")\r\n\r\n # obtian the scores as input data to the plots\r\n bxplt_data_gd = [score['ghidra'] for score in SCORES.values()]\r\n bxplt_data_r2 = [score['r2'] for score in SCORES.values()]\r\n\r\n # run pairwise t test\r\n print(\"Performing T-Test on LZJD Distance of files\")\r\n run_ttest(bxplt_data_gd, bxplt_data_r2)",
"def time_calibration(input_file):\n original_path = os.getcwd()\n save_path = input_file['save_path']\n #change to save data reduction directory\n os.chdir(save_path)\n print '\\n Reading the list of images ....\\n'\n planet = input_file['exoplanet'] #set exoplanet name\n images = sorted(glob.glob('AB'+planet+'*.fits'))\n print images\n #include de RA,DEC and epoch of the exoplanet\n RA,DEC,epoch = input_file['RA'],input_file['DEC'],input_file['epoch']\n #obtain ST JD using iraf task and introduce in the header\n for i in range(len(images)):\n hdr = fits.getheader(images[i])\n if int(split(hdr['UT'],':')[0]) < int(hdr['timezone']):\n new_date = use.yesterday(hdr['date-obs'])\n #print images[i], new_date\n else:\n new_date = hdr['date-obs']\n year,month,day = split(new_date,'-')\n iraf.asttimes(year=year,month=month,day=day,time=hdr['loctime'],obs=input_file['observatory'])\n JD = iraf.asttimes.jd #obtain julian date\n LMST = iraf.asttimes.lmst #obtain the sideral time\n LMST = use.sexagesimal_format(LMST) #convert sideral time in sexagesimal format\n iraf.hedit(images[i],'ST',LMST,add='yes',verify='no',show='no',update='yes') #create the ST keyword in the header\n iraf.ccdhedit(images[i],'LMST',LMST,type='string') #include the mean sideral time in the header\n iraf.ccdhedit(images[i],'JD',JD,type='string') #include de julian date in the header\n #include RA, and DEC of the object in your header\n iraf.ccdhedit(images[i],\"RA\",RA,type=\"string\") #include right ascention in the header\n iraf.ccdhedit(images[i],\"DEC\",DEC,type=\"string\") #include declination in the header\n iraf.ccdhedit(images[i],\"epoch\",epoch,type=\"string\") #include epoch in the header\n # use.update_progress((i+1.)/len(images))\n print '\\n Setting airmass ....\\n'\n for i in range(len(images)):\n print '# ',images[i]\n #iraf.hedit(images[i],'airmass',airmass,add='yes')\n #iraf.hedit(images[i],'HJD',HJD,add='yes')\n iraf.setairmass.observatory = input_file['observatory']\n iraf.setairmass(images[i])\n iraf.setjd.time = 'ut'\n iraf.setjd(images[i])\n print '\\n.... done.\\n'\n #export information\n hjd, jd, airmass, st = [],[],[],[]\n for i in range(len(images)):\n hdr = fits.getheader(images[i])\n hjd.append(hdr['HJD'])\n jd.append(hdr['JD'])\n airmass.append(hdr['airmass'])\n st.append(hdr['st'])\n #saving the data\n data = DataFrame([list(hjd),list(jd),list(st),list(airmass)]).T\n data.columns = ['HJD','JD','ST','Airmass']\n data.to_csv('results_iraf_calibrations.csv')\n #change to workings directory\n os.chdir(original_path)\n return",
"def compare_river_directions_with_dynriver_corrs_and_MERIThydro_derived_corrs(self):\n ref_base_dir = (\"/Users/thomasriddick/Documents/data/lake_analysis_runs\"\n \"/lake_analysis_one_21_Jun_2021\")\n data_base_dir = (\"/Users/thomasriddick/Documents/data/lake_analysis_runs/\"\n \"lake_analysis_two_26_Mar_2022\")\n ref_filename=os.path.join(ref_base_dir,\n \"rivers/results/diag_version_29_date_0\",\n \"30min_flowtocell.nc\")\n data_filename=os.path.join(data_base_dir,\n \"rivers/results/diag_version_0_date_0\",\n \"30min_flowtocell.nc\")\n lsmask_filename=os.path.join(self.plots_data_dir,\n \"ls_mask_extract_ls_mask_from_corrected_HD_rdirs_20160504_142435.nc\")\n ref_catchment_filename=os.path.join(ref_base_dir,\n \"rivers/results/diag_version_29_date_0\",\n \"30min_catchments.nc\")\n data_catchment_filename=os.path.join(data_base_dir,\n \"rivers/results/diag_version_0_date_0\",\n \"30min_catchments.nc\")\n ref_rdirs_filename=os.path.join(ref_base_dir,\n \"rivers/results/diag_version_29_date_0\",\n \"30min_rdirs.nc\")\n reference_rmouth_outflows_filename=os.path.join(ref_base_dir,\n \"rivers/results/diag_version_29_date_0\",\n \"30min_flowtorivermouths.nc\")\n data_rmouth_outflows_filename=os.path.join(data_base_dir,\n \"rivers/results/diag_version_0_date_0\",\n \"30min_flowtorivermouths.nc\")\n #glacier_mask_filename=os.path.join(self.orog_data_directory,\"ice5g_v1_2_21_0k_10min.nc\")\n self.FlowMapTwoColourComparisonWithCatchmentsHelper(ref_flowmap_filename=ref_filename,\n data_flowmap_filename=data_filename,\n ref_catchment_filename=\\\n ref_catchment_filename,\n data_catchment_filename=\\\n data_catchment_filename,\n ref_rdirs_filename=\\\n ref_rdirs_filename,\n data_rdirs_filename=None,\n reference_rmouth_outflows_filename=\\\n reference_rmouth_outflows_filename,\n data_rmouth_outflows_filename=\\\n data_rmouth_outflows_filename,\n lsmask_filename=lsmask_filename,\n minflowcutoff=80,\n flip_data=False,\n rotate_data=False,\n flip_ref=False,\n rotate_ref=False,\n lsmask_has_same_orientation_as_ref=False,\n flip_lsmask=False,rotate_lsmask=False,\n invert_ls_mask=False,\n first_datasource_name=\"old\",\n second_datasource_name=\"MERIT derived\",\n matching_parameter_set='extensive',\n catchment_and_outflows_mods_list_filename=\\\n None,\n #\"catch_and_outflow_mods_ice6g_vs_ice5g_lgm.txt\",\n #additional_matches_list_filename=\\\n #\"additional_matches_ice6g_vs_ice5g_lgm.txt\",\n use_single_color_for_discrepancies=True,\n use_only_one_color_for_flowmap=False,\n use_title=False,remove_antartica=True,\n difference_in_catchment_label=\"Difference\",\n grid_type='HD')"
] | [
"0.6232863",
"0.6190931",
"0.6065787",
"0.5965935",
"0.59624934",
"0.5858929",
"0.5856982",
"0.5837951",
"0.5831565",
"0.58128524",
"0.5789938",
"0.57884246",
"0.5777758",
"0.5774943",
"0.57337826",
"0.5721014",
"0.569534",
"0.5672289",
"0.56514597",
"0.5634904",
"0.5580489",
"0.55664647",
"0.55572945",
"0.5547208",
"0.55148506",
"0.55077463",
"0.5495898",
"0.5479068",
"0.5474958",
"0.5472551"
] | 0.7407615 | 0 |
keys_to_track order is important! Matches will be tested in this order. | def __init__(self, keys_to_track):
self.keys_to_track = keys_to_track
self.tracker = {}
for key_to_track in self.keys_to_track:
self.tracker[key_to_track] = {} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _add_matches(self):\r\n for record in self.records:\r\n match_dict={key_to_track: record.get(key_to_track)\r\n for key_to_track in self.key_matcher.keys()}\r\n self.key_matcher.add(obj=record,\r\n match_dict=match_dict)",
"def test_matching_tracks(self):\n\n # 5037: Pop 101 (feat. Anami Vice) by Marianas Trench\n # 8755 : Satisfied (feat. Miguel & Queen Latifah) by Sia\n # 6699 : Un Besito Mas (feat. Juan Luis Guerra) by Jesse & Joy\n targets = {5037: '2fGFaTDbE8aS4f31fM0XE4',\n 8755: '1ybJ2itxCxPCPkcA9sOgTO',\n 6699: '1182pxG4uNxr3QqIH8b8k0',\n }\n\n matches = {track.i_id: track.id\n for track in self.tracks\n if track.i_id in targets}\n\n for i_id, s_id in targets.iteritems():\n self.assertEqual(s_id, matches[i_id])",
"def test_keys(self):\n storage = Storage()\n keys_to_set = {'1': 'hello',\n '2': 'bye',\n '3': [1, 2, 'three'],\n '4': {1: 'one', 2: 'two'},\n 'a': 1,\n 'aa': 2,\n 'abc':3,\n 'hello':4}\n for key in keys_to_set:\n storage.set(key, keys_to_set[key])\n\n pattern_answers = {'?': ['1','2','3','4','a'],\n '*': list(keys_to_set.keys()),\n '[13]': ['1', '3'],\n '[^a]': ['1','2','3','4'],\n '[1-3]': ['1','2','3'],\n '?[ae]*': ['aa', 'hello']}\n for pattern in pattern_answers:\n self.assertEqual(pattern_answers[pattern],\n storage.keys(pattern), f'For pattern \"{pattern}\" expected {pattern_answers[pattern]}.')",
"def matches(self):\n pass",
"def match(self, match_dict):\r\n for match_key in match_dict.keys():\r\n assert match_key in self.keys_to_track\r\n\r\n for key_to_track in self.keys_to_track:\r\n if match_dict.has_key(key_to_track):\r\n match_val = match_dict[key_to_track]\r\n if self.tracker[key_to_track].has_key(match_val):\r\n return self.tracker[key_to_track][match_val]\r\n return None",
"def test_keywords(self):\n\n test_cases = (\n makeTestCase('adele 21',\n AlbumResultMatcher(title=Equals('21'), artist=Equals('adele')),\n ArtistResultMatcher(title=Equals('adele'))),\n makeTestCase('kanye power',\n TrackResultMatcher(title=Equals('power', artist=Equals('kanye west'))),\n ArtistResultMatcher(title=Equals('kanye west')),\n AlbumResultMatcher(title=Equals('my beautiful dark twisted fantasy'))),\n makeTestCase('ratat party with children',\n TrackResultMatcher(title=Equals('party with children', artist=Equals('ratatat'))),\n ArtistResultMatcher(title=Equals('ratatat'))),\n makeTestCase('flobot fight with tools handlebars',\n TrackResultMatcher(title=Equals('handlebars')),\n ArtistResultMatcher(title=Equals('flobots')),\n AlbumResultMatcher(title=Equals('fight with tools')))\n )\n\n self._run_tests(tests, {})",
"def report_keyset(self):\n for i, matchset in enumerate(self.matches):\n if len(matchset) == 1:\n print \"[%02d]\" % i, fmt(sorted([k for k, data in matchset.items()]), BLUE)\n elif len(matchset) != 0:\n print \"[%02d]\" % i, fmt(sorted([k for k, data in matchset.items()]), WHITE)\n else:\n print \"[%02d]\" % i, fmt(\"[X]\", RED)",
"def add(self, obj, match_dict):\r\n for match_key in match_dict.keys():\r\n assert match_key in self.keys_to_track\r\n\r\n for key_to_track in self.keys_to_track:\r\n if match_dict.has_key(key_to_track):\r\n match_val = match_dict[key_to_track]\r\n if match_val is None or match_val == '':\r\n pass\r\n else:\r\n self.tracker[key_to_track][match_val] = obj",
"def test_ms_track_search(helpers):\n item_from_xml, item_from_dict = common_tests(\n MSTrack,\n MS_TRACK_SEARCH_XML,\n MS_TRACK_SEARCH_DICT,\n \"00020064tracksearch:pilgrim\",\n helpers,\n )\n getter_attributes_test(\n \"artist\", item_from_xml, item_from_dict, MS_TRACK_SEARCH_DICT.get(\"artist\")\n )\n getter_attributes_test(\n \"uri\", item_from_xml, item_from_dict, MS_TRACK_SEARCH_DICT[\"uri\"]\n )",
"def test_keys_eq(self):\n self.assertListEqual(self.result, self.expected)",
"def match_source_key(self, match):\n raise NotImplementedError",
"def testTrackDict3(self):\n\n goodTrackDict = {\n \"number\": \"12\", \"uid\": \"301356576\", \"codec_id\": \"S_TEXT/SSA\",\n \"codec_private_length\": \"783\", \"codec_private_data\": \"5b5363726\",\n \"language\": \"slv\", \"track_name\": \"Slovenian\", \"default_track\": \"0\",\n \"forced_track\": \"0\", \"enabled_track\": \"1\"\n }\n\n trackLine = _buildTrackLine(11, 'subtitles', goodTrackDict)\n\n trackID, trackType, trackDict = tools._trackInfo(trackLine)\n\n self.assertEqual(\n goodTrackDict,\n trackDict\n )",
"def store_matches(\n self,\n parameter_handler: ParameterHandler,\n topology: Topology,\n ) -> None:\n if self.key_map:\n # TODO: Should the key_map always be reset, or should we be able to partially\n # update it? Also Note the duplicated code in the child classes\n self.key_map: dict[BondKey, PotentialKey] = dict() # type: ignore[assignment]\n matches = parameter_handler.find_matches(topology)\n for key, val in matches.items():\n param = val.parameter_type\n if param.k_bondorder or param.length_bondorder:\n bond = topology.get_bond_between(*key)\n fractional_bond_order = bond.fractional_bond_order\n if not fractional_bond_order:\n assert self._get_uses_interpolation(parameter_handler)\n raise RuntimeError(\n \"Bond orders should already be assigned at this point\",\n )\n else:\n fractional_bond_order = None\n topology_key = BondKey(\n atom_indices=key,\n bond_order=fractional_bond_order,\n )\n\n potential_key = PotentialKey(\n id=val.parameter_type.smirks,\n associated_handler=parameter_handler.TAGNAME,\n bond_order=fractional_bond_order,\n )\n self.key_map[topology_key] = potential_key\n\n valence_terms = self.valence_terms(topology)\n\n _check_all_valence_terms_assigned(\n handler=parameter_handler,\n topology=topology,\n assigned_terms=matches,\n valence_terms=valence_terms,\n )",
"def search_key_full_eq(self, key):\n for i in xrange(len(self.keys)):\n flag = 0\n for indx in range(4):\n if cmp(self.keys[i][indx],key[indx]) == 0:\n flag = 0\n continue\n if cmp(key[indx],\"*\") == 0:\n print \" visited internal! ==>\", self.keys[i]\n return self.pointers[i]\n elif self.keys[i][indx] > key[indx]:\n flag = 1\n else:\n flag = 2\n break \n # print \"searching %s:%s:%d\" %(str(self.keys[i]),str(key),flag)\n if flag == 1:\n if i > 0:\n print \" visited internal ==>\", self.keys[i] \n return self.pointers[i]\n else:\n print \" visited internal ==>\", self.keys[0] \n return self.pointers[0]\n elif flag == 0:\n print \" visited internals ==>\", self.keys[i]\n return self.pointers[i]\n print \" visited internalsed ==>\", self.keys[-1] \n return self.pointers[-1]",
"def test_audio_features(self):\n\n # 1ehPJRt49h6N0LoryqKZXq, 8737: How Far I'll Go (Alessia Cara Version) by Alessia Cara\n # 2fGFaTDbE8aS4f31fM0XE4, 5037: Pop 101 (feat. Anami Vice) by Marianas Trench\n targets = {8737: {'danceability': 0.317,\n 'energy': 0.562,\n 'key': 9,\n 'loudness': -9.609,\n 'mode': 1,\n 'speechiness': 0.395,\n 'acousticness': 0.124,\n 'instrumentalness': 0.000144,\n 'liveness': 0.0667,\n 'valence': 0.127,\n 'tempo': 181.100,\n 'duration_ms': 175507,\n 'time_signature': 4,\n },\n 5037: {'danceability': 0.756,\n 'energy': 0.658,\n 'key': 11,\n 'loudness': -6.128,\n 'mode': 0,\n 'speechiness': 0.202,\n 'acousticness': 0.0581,\n 'instrumentalness': 0,\n 'liveness': 0.0674,\n 'valence': 0.640,\n 'tempo': 120.018,\n 'duration_ms': 247829,\n 'time_signature': 4,\n },\n }\n\n results = {track.i_id: track for track in self.tracks if track.i_id in targets}\n\n for target, expecteds in targets.iteritems():\n result = results[target]\n for key, expected in expecteds.iteritems():\n self.assertEqual(result.__getattr__(key), expected)",
"def store_matches(\n self,\n parameter_handler: ImproperTorsionHandler,\n topology: Topology,\n ) -> None:\n if self.key_map:\n self.key_map = dict()\n matches = parameter_handler.find_matches(topology)\n for key, val in matches.items():\n parameter_handler._assert_correct_connectivity(\n val,\n [\n (0, 1),\n (1, 2),\n (1, 3),\n ],\n )\n n_terms = len(val.parameter_type.k)\n for n in range(n_terms):\n smirks = val.parameter_type.smirks\n non_central_indices = [key[0], key[2], key[3]]\n\n for permuted_key in [\n (\n non_central_indices[i],\n non_central_indices[j],\n non_central_indices[k],\n )\n for (i, j, k) in [(0, 1, 2), (1, 2, 0), (2, 0, 1)]\n ]:\n topology_key = ImproperTorsionKey(\n atom_indices=(key[1], *permuted_key),\n mult=n,\n )\n potential_key = PotentialKey(\n id=smirks,\n mult=n,\n associated_handler=\"ImproperTorsions\",\n )\n self.key_map[topology_key] = potential_key",
"def testTrackDict1(self):\n\n goodTrackDict = {\n \"number\": \"1\", \"uid\": \"1493619965\",\n \"codec_id\": \"V_MPEG4/ISO/AVC\", \"codec_private_length\": \"44\",\n \"codec_private_data\": \"014d4028ffe1001c80\", \"language\": \"eng\",\n \"pixel_dimensions\": \"1920x1080\", \"display_dimensions\": \"1920x1080\",\n \"default_track\": \"1\", \"forced_track\": \"0\", \"enabled_track\": \"1\",\n \"packetizer\": \"mpeg4_p10_video\", \"default_duration\": \"41708332\",\n \"content_encoding_algorithms\": \"3\"\n }\n\n trackLine = _buildTrackLine(0, 'video', goodTrackDict)\n\n trackID, trackType, trackDict = tools._trackInfo(trackLine)\n\n self.assertEqual(\n goodTrackDict,\n trackDict\n )",
"def test_sample_mapped_keys(self):\r\n\r\n # With num_coverage=1 only the keys will be sampled\r\n actual = sample_mapped_keys(self.test_map, 1)\r\n self.assertEqual(actual, {'1': ['1'], '2': ['2']})\r\n\r\n actual = sample_mapped_keys(self.test_map, 3)\r\n for key in actual.keys():\r\n # check number of sampled keys\r\n self.assertEqual(3, len(actual[key]))\r\n for x in actual[key]:\r\n # check that sampled key is in the full list\r\n correct = list(self.test_map[key])\r\n correct.append(key)\r\n self.assertTrue(x in correct)",
"def test_match_ordered(self):\n first = dict(\n a=1,\n b=2,\n )\n\n second = OrderedDict(\n b=2,\n a=1,\n )\n\n check_keys_match_recursive(first, second, [])",
"def keysAll():",
"def matches(hand):\n return list(sorted(match_iter(hand), reverse=True))",
"def _match_tracks(artist, title, mb_tracks):\n # pylint: disable=R0914\n dbg(\"artists is %s\", artist)\n dbg(\"title is %s\", title)\n title_artist_str = c.g + title + c.w, c.g + artist + c.w\n xprint(\"\\nSearching for %s by %s\\n\\n\" % title_artist_str)\n\n def dtime(x):\n \"\"\" Format time to M:S. \"\"\"\n return time.strftime('%M:%S', time.gmtime(int(x)))\n\n # do matching\n for track in mb_tracks:\n ttitle = track['title']\n length = track['length']\n xprint(\"Search : %s%s - %s%s - %s\" % (c.y, artist, ttitle, c.w,\n dtime(length)))\n q = \"%s %s\" % (artist, ttitle)\n w = q = ttitle if artist == \"Various Artists\" else q\n query = generate_search_qs(w, 0, result_count=50)\n dbg(query)\n have_results = _search(q, query, splash=False, pre_load=False)\n\n if not have_results:\n xprint(c.r + \"Nothing matched :(\\n\" + c.w)\n continue\n\n results = g.model.songs\n s, score = _best_song_match(results, artist + \" \" + ttitle, length)\n cc = c.g if score > 85 else c.y\n cc = c.r if score < 75 else cc\n xprint(\"Matched: %s%s%s - %s \\n[%sMatch confidence: \"\n \"%s%s]\\n\" % (c.y, s.title, c.w, fmt_time(s.length),\n cc, score, c.w))\n yield s",
"def _keys_in_sorted(move):\n return (move.picking_id.id, move.product_id.responsible_id.id)",
"def testSortedNotes(self):\n for simple_score in self.simple_scores.values():\n notes = simple_score.sorted_notes\n assert all(notes[i].start_time <= notes[i + 1].start_time\n for i in range(len(notes) - 1))",
"def testTrackDict2(self):\n\n goodTrackDict = {\n \"number\": \"2\", \"uid\": \"3442966448\", \"codec_id\": \"A_VORBIS\",\n \"codec_private_length\": \"4412\", \"codec_private_data\": \"020808\",\n \"language\": \"jpn\", \"track_name\": \"2ch\\\\sVorbis\",\n \"default_track\": \"1\", \"forced_track\": \"0\", \"enabled_track\": \"1\",\n \"audio_sampling_frequency\": \"48000\", \"audio_channels\": \"2\"\n }\n\n trackLine = _buildTrackLine(1, 'audio', goodTrackDict)\n\n trackID, trackType, trackDict = tools._trackInfo(trackLine)\n\n self.assertEqual(\n goodTrackDict,\n trackDict\n )",
"def keys(targets):",
"def check_keys(self):",
"def test_match_table_post(self):\n pass",
"def getPossibleMatchesList(self):\n return [p for p in self._patterns if p.startswith(self._keyCode)]",
"def store_matches(\n self,\n parameter_handler: ProperTorsionHandler,\n topology: Topology,\n ) -> None:\n if self.key_map:\n self.key_map: dict[ProperTorsionKey, PotentialKey] = dict() # type: ignore[assignment]\n matches = parameter_handler.find_matches(topology)\n for key, val in matches.items():\n param = val.parameter_type\n n_terms = len(val.parameter_type.phase)\n for n in range(n_terms):\n smirks = param.smirks\n if param.k_bondorder:\n # The relevant bond order is that of the _central_ bond in the torsion\n bond = topology.get_bond_between(key[1], key[2])\n fractional_bond_order = bond.fractional_bond_order\n if not fractional_bond_order:\n raise RuntimeError(\n \"Bond orders should already be assigned at this point\",\n )\n else:\n fractional_bond_order = None\n topology_key = ProperTorsionKey(\n atom_indices=key,\n mult=n,\n bond_order=fractional_bond_order,\n )\n potential_key = PotentialKey(\n id=smirks,\n mult=n,\n associated_handler=\"ProperTorsions\",\n bond_order=fractional_bond_order,\n )\n self.key_map[topology_key] = potential_key\n\n _check_all_valence_terms_assigned(\n handler=parameter_handler,\n topology=topology,\n assigned_terms=matches,\n valence_terms=list(topology.propers),\n )"
] | [
"0.72052854",
"0.6073225",
"0.5684413",
"0.5646537",
"0.55177075",
"0.54479295",
"0.54022795",
"0.5286637",
"0.5255114",
"0.52513975",
"0.5236649",
"0.52352864",
"0.5173165",
"0.5170619",
"0.5168231",
"0.51618785",
"0.51518595",
"0.51467997",
"0.5143868",
"0.51270205",
"0.5095624",
"0.5080584",
"0.5073778",
"0.5068419",
"0.5047828",
"0.50372577",
"0.5029652",
"0.5017005",
"0.5014709",
"0.5010062"
] | 0.6227405 | 1 |
Add obj as a match for match_dict values. Checks to make sure match_dict keys are valid. | def add(self, obj, match_dict):
for match_key in match_dict.keys():
assert match_key in self.keys_to_track
for key_to_track in self.keys_to_track:
if match_dict.has_key(key_to_track):
match_val = match_dict[key_to_track]
if match_val is None or match_val == '':
pass
else:
self.tracker[key_to_track][match_val] = obj | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _add_matches(self):\r\n for record in self.records:\r\n match_dict={key_to_track: record.get(key_to_track)\r\n for key_to_track in self.key_matcher.keys()}\r\n self.key_matcher.add(obj=record,\r\n match_dict=match_dict)",
"def add_match(self, event):\n event = copy.deepcopy(event)\n # Convert datetime's back to timestamps\n ts = self.rules.get(\"timestamp_field\")\n if ts in event:\n event[ts] = dt_to_ts(event[ts])\n\n self.matches.append(event)",
"def addMatch(self, id, match):\n self._matches[id] = match",
"def add_matching(self, matching: list):\n self.matching = matching",
"def filter_matches_add(self, key, value):\n\t\tif key in self.filter_matches.keys():\n\t\t\tself.filter_matches[key].append(value)\n\t\telse:\n\t\t\tself.filter_matches[key]=[value]",
"def add_match(self, match):\n self.matches.append(match)",
"def match(self, match_dict):\r\n for match_key in match_dict.keys():\r\n assert match_key in self.keys_to_track\r\n\r\n for key_to_track in self.keys_to_track:\r\n if match_dict.has_key(key_to_track):\r\n match_val = match_dict[key_to_track]\r\n if self.tracker[key_to_track].has_key(match_val):\r\n return self.tracker[key_to_track][match_val]\r\n return None",
"def match(self, data_instance: Dict[str, Any]) -> bool:",
"def add_match(self, match):\n\n # update cache results with game result\n if self.cached_results:\n results = json.loads(self.cached_results)\n else:\n results = []\n\n winner = match.winner == self\n opponent = match.loser if winner else match.winner\n new_rating = match.winner_rating_after if winner else \\\n match.loser_rating_after\n\n results.append({\n 'winner': winner,\n 'opponent_name': opponent.name,\n 'played_time': str(match.played_time),\n 'played_timestamp': get_timestamp(match.played_time)\n })\n self.cached_results = json.dumps(results[-CACHED_RATING_LIMIT:])\n\n # update player with new rating\n self.update_rating(new_rating, match)\n\n # save the player in the database\n self.save()",
"def __addMatch(db, match, oldRatings, newRatings):\n c = db.cursor()\n player1EloChange = newRatings[0] - oldRatings[0]\n player2EloChange = newRatings[1] - oldRatings[1]\n\n player1 = __playerCache[match['player1-id']]\n player2 = __playerCache[match['player2-id']]\n winner = __playerCache[match['winner-id']]\n\n c.execute(\"INSERT INTO matches \"\n \"VALUES(%s,%s,'%s','%s','%s','%s',%s,%s,%s,%s)\" %\n (match['id'], match['tournament-id'],\n match['updated-at'],\n player1['email-hash'], player2['email-hash'],\n winner['email-hash'],\n oldRatings[0], oldRatings[1],\n player1EloChange, player2EloChange))",
"def add_match(self, f, exclusions=None, **match_kwargs):\n assert not self._checked, 'can\\'t add after matchlist has been checked'\n\n if not match_kwargs: # Do nothing if no match_kwargs.\n return f\n\n self._verify_match_kwargs(match_kwargs, exclusions)\n self.matchers.append((match_kwargs, exclusions, f))\n return f",
"def _add_object(self, object_dict):\n # Attempt to map the object first. This will raise an\n # ItemExistsError if a named object of the same type already\n # exists.\n self._add_object_to_map(self.append_key, object_dict)\n\n # Add the object to the end of the model.\n # TODO: which objects need added to the beginning?\n self.model_dict[self.append_key] = object_dict\n\n # Update append key.\n self._update_append_key()",
"def add_ignored_match(self, secret: dict) -> None:\n\n matches_ignore = [\n match[\"match\"] if isinstance(match, dict) else match\n for match in self.matches_ignore\n ]\n if secret[\"match\"] not in matches_ignore:\n self.matches_ignore.append(secret)\n else:\n for match in self.matches_ignore:\n if (\n isinstance(match, dict)\n and match[\"match\"] == secret[\"match\"]\n and match[\"name\"] == \"\"\n ):\n match.update({\"name\": secret[\"name\"]})",
"def __add__(aMatchList, bMatchList):\n for id in bMatchList._matches.keys():\n aMatchList.addMatch(id, bMatchList._matches[id])\n return aMatchList",
"def add(dictionary, iterable, pattern, must_match=True, add_only_keys=None, ignore_errors=False):\n matcher = re.compile(pattern)\n for line in iterable:\n match = matcher.match(line)\n if not match:\n if must_match:\n raise ConfigurationError(\"Cannot match key-value from '%s' with pattern '%s'. Must match is set to true\" % (line, pattern))\n else:\n continue\n key = match.group(1).strip()\n try:\n value = match.group(2).strip()\n value = json.loads(value) if len(value) > 0 else None\n if add_only_keys is None or key in add_only_keys:\n dictionary[key] = value\n logging.debug(\"Key-value item (%s=%s) has been parsed and added to dictionary\", key, str(value))\n except ValueError as err:\n if not ignore_errors:\n raise ConfigurationError(\"Cannot parse JSON string '%s' with key '%s' (key-value definition: '%s'). Error is %s\" % (value, key, line, str(err)))",
"def _add_object_to_map(self, model_key, object_dict):\n # Grab reference to the object sub-dict.\n object_map = self.model_map['object']\n\n # Get type of object.\n obj_type = object_dict['object']\n\n # Define key object pair\n key_obj = [model_key, object_dict]\n\n # If this type isn't in the map, add it. NOTE: this can lead to\n # empty entries if the object isn't named.\n if obj_type not in object_map:\n object_map[obj_type] = {}\n\n try:\n # Never try to map an already existing named object.\n if object_dict['name'] in object_map[obj_type]:\n s = '{} already exists in the {} map!'\n raise ItemExistsError(s.format(object_dict['name'], obj_type))\n\n except KeyError:\n # Unnamed object. Add it to the unnamed list.\n self.model_map['object_unnamed'].append(key_obj)\n\n else:\n # Named object, map it.\n object_map[obj_type][object_dict['name']] = key_obj\n\n # No need to return; we're directly updating self.model_map",
"def add_match_result(self, variable, match_result):\n if isinstance(match_result, MatchResult):\n self._match_result_dict[variable] = match_result\n else:\n raise ValueError(\"Input must be a valid TimeSeries object\")",
"def _add_found_values(self, transform_dict, transform_key,\n lookup_key, lookup_dict):\n try:\n if self._verify_key_exists(lookup_key, lookup_dict):\n transform_dict[transform_key] = \\\n ''.join(nested_lookup(lookup_key, lookup_dict))\n except TypeError:\n pass\n if isinstance(lookup_key, list):\n transform_dict[transform_key] = \\\n ''.join(self._key_list_search(lookup_key, lookup_dict))\n return transform_dict",
"def match_info(info_dict):\n return True",
"def test_addEntryByDict(self):\n self.g.entryFormat = ['term', 'tags', 'value']\n b = self.g.add_entry({'term': 'foo', 'tags': 'a', 'value': '1'})\n self.assertTrue(b)",
"def add_rule_to_dict(rule_dict, lhs, rhs):\n if rhs not in rule_dict:\n rule_dict[rhs] = list()\n rule_dict[rhs].append(lhs) \n return rule_dict",
"def findMatches3(personDict,matches,skepticalMatches,additionalMatches,personDict2):\n dictConsidered = personDict['ALIAS']\n for alias in dictConsidered:\n if alias == \"\":\n continue\n pairs = itertools.combinations(dictConsidered[alias],2)\n for p in pairs:\n k = tuple(sorted(p))\n if (k not in matches) and (k not in skepticalMatches) and (k not in additionalMatches):\n info1 = personDict['EnterpriseID'][p[0]]\n info2 = personDict['EnterpriseID'][p[1]]\n \n info1b = personDict2['EnterpriseID'][p[0]]\n info2b = personDict2['EnterpriseID'][p[1]]\n score = getScorePair(info1b,info2b)\n if score>=7:\n additionalMatches[k] = score\n\n return additionalMatches",
"def test_updatewithdictionarybycomparingdictionaries(self):\n s1 = Square(10, 2, 1, 9)\n s1_dictionary = s1.to_dictionary()\n s2 = Square(1, 1)\n s2.update(**s1_dictionary)\n self.assertEqual(s1.__dict__, s2.__dict__)",
"def matches(self, matches):\n\n self._matches = matches",
"def matches(self, matches):\n\n self._matches = matches",
"def test_addDict(self):\n lidi = []\n lidi.append({'term': 'foo', 'tags': 'a', 'value': '1'})\n lidi.append({'term': 'bar', 'tags': 'a, b', 'value': '2'})\n lidi.append({'term': 'gnark', 'tags': 'a, c', 'value': '3'})\n self.g.add_dict(lidi)",
"def insert_match(self, gameid):\n if Match.query.filter(Match.gameid == gameid).first():\n self.logger.info(\"Match {} already exists in the DB\".format(gameid))\n return True\n match_json = self.rc.get_match(gameid)\n if not match_json:\n self.logger.warning(\"API did not return data for this gameid: {}\".format(gameid))\n return False\n match_json = self.lower_keys(match_json)\n # Get column names\n match_columns = Match.__table__.columns.keys()\n # Remove all k:v pairs that do not match column names\n to_del = []\n for k, v in match_json.items():\n if k not in match_columns:\n to_del.append(k)\n # del match_json[k]\n for k in to_del:\n del match_json[k]\n match = Match(**match_json)\n match.gamecreation = datetime.utcfromtimestamp(match.gamecreation // 1000)\n self.db.session.add(match)\n self.db.session.commit()\n return True",
"def replace(self, matchobj):\n key = matchobj.group(1)\n if self.dict.has_key(key):\n return self.dict[key]\n else:\n return ''",
"def addtwodimdict(self, thedict, key_a, key_b, val):\r\n if key_a in thedict:\r\n thedict[key_a].update({key_b: val})\r\n else:\r\n thedict.update({key_a: {key_b: val}})",
"def add(self, obj: model.IdentifiableArtefact):\n for field, field_info in direct_fields(self.__class__).items():\n # NB for some reason mypy complains here, but not in __contains__(), below\n if isinstance(\n obj, get_args(field_info.outer_type_)[1], # type: ignore [attr-defined]\n ):\n getattr(self, field)[obj.id] = obj\n return\n raise TypeError(type(obj))"
] | [
"0.72251153",
"0.6223135",
"0.612522",
"0.6043846",
"0.6039093",
"0.5865672",
"0.5826964",
"0.5673978",
"0.5657533",
"0.56395197",
"0.5595575",
"0.55939347",
"0.5582442",
"0.55700904",
"0.55490994",
"0.54979956",
"0.5410391",
"0.5368418",
"0.536791",
"0.535772",
"0.5288205",
"0.5236559",
"0.52085674",
"0.5194137",
"0.5194137",
"0.51784515",
"0.5150603",
"0.51416355",
"0.5136652",
"0.51329386"
] | 0.84316427 | 0 |
Find a match using match_dict. Returns None if there is no match. Checks to make sure match_dict keys are valid. | def match(self, match_dict):
for match_key in match_dict.keys():
assert match_key in self.keys_to_track
for key_to_track in self.keys_to_track:
if match_dict.has_key(key_to_track):
match_val = match_dict[key_to_track]
if self.tracker[key_to_track].has_key(match_val):
return self.tracker[key_to_track][match_val]
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _find_match(needle: dict, haystack: list, keys: list):\n for item in haystack:\n for key in keys:\n if item.get(key) != needle[key]:\n break\n else:\n return item\n return None",
"def dict_match(d, key, default=None):\n\n if key in d and \"[\" not in key:\n return d[key]\n else:\n for pattern, value in iteritems(d):\n if fnmatchcase(key, pattern):\n return value\n return default",
"def find_match(name, dictionary):\n if name == '':\n # raise \"Didn't find name\"\n return False\n search_name = (' ').join(name.split(' ')[:-1])\n if search_name in dictionary:\n return search_name\n else:\n return find_match(search_name, dictionary)",
"async def get_match_from_id(match_id: int) -> Match or None:\n if match_id is None:\n return None\n\n if match_id in match_library:\n return match_library[match_id]\n\n raw_data = await matchdb.get_raw_match_data(match_id)\n if raw_data is not None:\n return await make_match_from_raw_db_data(raw_data)\n else:\n return None",
"def find(self, location, dictionary=None, key_index=1):\n\n # dictionary is self object if no dictionary is provided\n if not dictionary:\n dictionary = self\n\n # take first value field to be found\n value = normalize_name(location[key_index])\n\n # extract matched value from\n value_matched = process.extractOne(value, dictionary.keys())\n\n if value_matched and value_matched[1] > self.THRESHOLD_RATIO:\n key = value_matched[0]\n\n # if there are more values to evaluate, call recursively\n if len(location) > key_index + 1:\n # print value_matched[1],\n return self.find(location, dictionary[key], key_index + 1)\n\n else:\n # print value_matched[1],\n return dictionary[key]\n\n else:\n return None",
"def search(d, key, default=None):\n stack = [iter(d.items())]\n while stack:\n for k, v in stack[-1]:\n if isinstance(v, dict):\n stack.append(iter(v.items()))\n break\n elif k == key:\n return v\n else:\n stack.pop()\n return default",
"def get_match_result(self, variable):\n try:\n return self._match_result_dict[variable]\n except KeyError:\n return None",
"def find(name, *dicts):\n for d in dicts:\n if type(d) == str:\n return d\n elif name in d and d[name] is not None:\n return d[name]\n\n return None",
"def find_in_dict(obj, key):\n if key in obj:\n return obj[key]\n for k, v in obj.items():\n if isinstance(v,dict):\n item = find_in_dict(v, key)\n if item is not None:\n return item",
"def _get_value(match_entry: Dict, path0: str) -> any:\n if path0 is None:\n current_el = match_entry\n else:\n path = path0.split('/')\n current_el = match_entry\n for p in path:\n if current_el is None:\n break\n current_el = current_el.get(p)\n return current_el",
"def find(self, entry_id: str, match: callable = operator.contains) -> Any:\n try:\n return next(self.find_all(entry_id, match))\n except StopIteration as error:\n raise errors.NoEntryFound(f'No entries found which match {entry_id}') from error",
"def find_value(dic, key):\n return dic[key]",
"def search_value(d, key, default=None):\n stack = [iter(d.items())]\n while stack:\n for k, v in stack[-1]:\n if isinstance(v, dict):\n stack.append(iter(v.items()))\n break\n elif k == key:\n return v\n else:\n stack.pop()\n return default",
"def get_match_hash_key(hashkey, table):\n if hashkey in table:\n # assume that this has at least one flow entry\n b = convert_ip_to_int(table[hashkey][0].nw_src)\n a = convert_ip_to_int(table[hashkey][0].nw_dst)\n match_hash = (a * a + a + b) if a >= b else (a + b * b)\n if match_hash in table:\n return match_hash\n else:\n return None\n else:\n return None",
"def dict_find_name(some_dict: Dict[str, Referent], path: List[str]) -> Result:\n if path:\n head, *tail = path\n try:\n return NameContainer.dict_find_name(\n cast(Dict[str, Referent], some_dict[head]),\n tail)\n except KeyError:\n NameContainer.logger.debug(f\"{head!r} not found in {some_dict.keys()}\")\n raise NameContainer.NotFound(path)\n else:\n return cast(Result, some_dict)",
"def _parse_line(line):\n\n for key, rx in rx_dict.items():\n match = rx.search(line)\n if match:\n return key, match\n # if there are no matches\n return None, None",
"def _parse_line(line):\n\n for key, rx in rx_dict.items():\n match = rx.search(line)\n if match:\n return key, match\n # if there are no matches\n return None, None",
"def _search_list_of_dictionaries(key, value, list_of_dictionaries):\n\n for element in list_of_dictionaries:\n if element.get(key) == value:\n return element\n return None",
"def findMatcher(self, ch):\n for m in self.matchers:\n if m.match(ch):\n return m\n return None",
"def match(id=0):\n match = Match.query.get(id)\n if match is not None:\n return render_template('match.html', match=Match.query.get(id))\n abort(404)",
"def _find_by_key(self, key, find):\n index = hashId(key, self.capacity) # Get the index/ bucket based on hash code of the key\n \n hash_table_cell = self._entry[index]\n found_item = None\n for item in hash_table_cell: #Iterrate the entry array and check the key is matching and if key is same than get the value\n if item[0] == key:\n found_item = item\n break\n\n return find(found_item, hash_table_cell)",
"def lookup(my_dict, my_key, default_value=None):\n if my_key in my_dict:\n return my_dict[my_key]\n else:\n return default_value",
"def find(self, answer):\n self._validate(answer)\n\n for index, existing in enumerate(self.answers):\n if answer.matches_dict(existing):\n return index\n\n return None",
"def parse_line(keyword_dict, line):\n for key, rx in keyword_dict.items():\n match = rx.search(line)\n if match:\n return key, match\n\n return None, None",
"def get_match_by_id(self, match_id):\n request = rq.get(\n 'https://{region}.api.pvp.net/api/lol/{region}/v{version}/match/{id}?api_key={api_key}'.format(\n region=self.region,\n version=api_version['match'],\n id=match_id,\n api_key=API_KEY\n )\n )\n print(self.region, request)\n check_response(request)\n return request.json()",
"def findKey(value,dict):\n for key, val in dict.iteritems():\n if value in val:\n return key",
"def dict_search_recursive(d, k):\n # FIXME: make it generic recursive search over nested graphs and move to smp_base\n\n # print \"#\" * 80\n # print \"searching k = %s \" % (k,),\n if k in d:\n # print \"found k = %s, params = %s\" % (k, d[k]['params'].keys())\n return d[k]\n else:\n # print \"d.keys()\", d.keys()\n for k_, v_ in list(d.items()):\n # if v_[\n if 'graph' in v_['params']: # or v_['params'].has_key('subgraph'):\n # print \"k_\", k_, \"v_\", v_['params'].keys()\n return dict_search_recursive(v_['params']['graph'], k)\n # None found\n return None",
"def find_item(hash_table_ref, key_sig):\n if not hasattr(key_sig, '__iter___'):\n key_sig = [key_sig]\n\n last_item = key_sig[len(key_sig) - 1]\n for key in key_sig:\n if key != last_item:\n if hasattr(hash_table_ref, 'keys') and key in hash_table_ref:\n hash_table_ref = hash_table_ref[key]\n else:\n # Item not found\n return None\n else:\n if hasattr(hash_table_ref, 'keys') and \\\n key in hash_table_ref:\n return hash_table_ref[key]\n # Item not found\n return None",
"def findItem(obj, key):\n if key in obj:\n return obj[key]\n if type(obj) == str:\n return None\n for k, v in obj.items():\n if isinstance(v, dict):\n item = findItem(v, key)\n if item is not None:\n return item\n elif isinstance(v, list):\n for list_item in v:\n item = findItem(list_item, key)\n if item is not None:\n return item",
"def find_one_bykey(cls, keydict, defaultval = None):\n return cls.dbm().modelclass_find_one_bykey(cls, keydict, defaultval)"
] | [
"0.6650081",
"0.64368933",
"0.62785393",
"0.6271919",
"0.606207",
"0.60218126",
"0.6009384",
"0.59157956",
"0.5891576",
"0.586902",
"0.5831622",
"0.58111554",
"0.58071977",
"0.580484",
"0.57333195",
"0.5707258",
"0.5707258",
"0.57004094",
"0.5668807",
"0.561176",
"0.5579269",
"0.5545683",
"0.5541376",
"0.5524781",
"0.5491723",
"0.5485185",
"0.5477593",
"0.5477031",
"0.5449981",
"0.54199696"
] | 0.8009377 | 0 |
Utility function to populate key_matcher from self.records. | def _add_matches(self):
for record in self.records:
match_dict={key_to_track: record.get(key_to_track)
for key_to_track in self.key_matcher.keys()}
self.key_matcher.add(obj=record,
match_dict=match_dict) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def match_source_key(self, match):\n raise NotImplementedError",
"def __init__(self):\n self.key_to_record = {}\n self.mutation_to_key = {}\n self._innovation_key_generator = count(0)",
"def test_toofewkeys(self):\n self.assertRaises(recordparser.KeyListMismatchError,\n recordparser.getfields, \"1234567890\", \"10s\", ())",
"def init_record_fields(self, run_record_key, record_fields):\n\n record_fields_grp = self.settings_grp[RECORD_FIELDS]\n\n # make a dataset for the sparse fields allowed. this requires\n # a 'special' datatype for variable length strings. This is\n # supported by HDF5 but not numpy.\n vlen_str_dt = h5py.special_dtype(vlen=str)\n\n # create the dataset with the strings of the fields which are records\n record_group_fields_ds = record_fields_grp.create_dataset(run_record_key,\n (len(record_fields),),\n dtype=vlen_str_dt,\n maxshape=(None,))\n\n # set the flags\n for i, record_field in enumerate(record_fields):\n record_group_fields_ds[i] = record_field",
"def initialize(self, keys: List[str]):",
"def _make_match_key(self, firstname, lastname):\n return \"{}::{}\".format(firstname.lower().strip(), lastname.lower().strip())",
"def test_toomanykeys(self):\n self.assertRaises(recordparser.KeyListMismatchError,\n recordparser.getfields, \"1234567890\", \"10s\", (\"key1\", \"key2\"))",
"def init_by_keys(cls, **query):\n raise NotImplementedError()",
"def gen_keys():",
"def __init__(self, matcher, generate):\n self.matcher = matcher\n self._generate = generate",
"def __init__(self, key):\n self.key = [int_mapping(k) for k in key]",
"def buildFromRecords(self, records):\n probes = {}\n for record in records:\n fields = {}\n for field in record.split(self.FIELD_DELIMITER):\n index = field.find(self.KEY_VALUE_DELIMITER)\n if index == -1 or len(field) < (index+1):\n raise InvariantViloation('detected invalid probe record in app info file - {}'.format(record))\n fields.update({field[:index]:field[index+1:]})\n if fields:\n try:\n fields[self.FIELD_FILE] = self.trimWorkspace(fields[self.FIELD_FILE], self.workspace)\n probes.update({\n fields[self.FIELD_RECORDER_RETURN_SITE] : AnchoredProbe(\n fields[self.FIELD_NAME], fields[self.FIELD_FILE], fields[self.FIELD_LINE],\n fields[self.FIELD_ATTRIBUTES], fields[self.FIELD_STATUS] == self.PROBE_STATUS_ENABLED,\n fields[self.FIELD_NAME]\n )\n })\n except KeyError as error:\n raise InvariantViloation('detected record missing field {} - \\n{}\\n{}'.format(error, record, fields))\n return probes",
"def train_test_split(record_dictionary, ratio=.5):\n\n num_training_records = int(len(record_dictionary) * ratio)\n\n keys = list(record_dictionary.keys())\n\n training_records = np.random.choice(\n keys, num_training_records, replace=False)\n testing_records = [key for key in keys if key not in training_records]\n\n training_dictionary = {\n record: record_dictionary[record]\n for record in training_records\n }\n testing_dictionary = {\n record: record_dictionary[record]\n for record in testing_records\n }\n\n return training_dictionary, testing_dictionary",
"def test_invalid_key_gen(self):\r\n expected = {1: {'ID': 'A233', 'Gender': 'M', 'Age': '22', 'Sales': '245', 'BMI': 'Normal', 'Salary': '23',\r\n 'Birthday': '24-06-1995'}, 2: {'ID': 'A244', 'Gender': 'M', 'Age': '30', 'Sales': '666',\r\n 'BMI': 'Underweight', 'Salary': '23', 'Birthday': '05-05-1988'},\r\n 3: {'ID': 'A253', 'Gender': 'M', 'Age': '35', 'Sales': '456', 'BMI': 'Obesity', 'Salary': '23',\r\n 'Birthday': '01-08-1983'}, 4: {'ID': 'A262', 'Gender': 'M', 'Age': '24', 'Sales': '999',\r\n 'BMI': 'Normal', 'Salary': '23', 'Birthday': '24-05-1993'}}\r\n data = {1: {'ID': 'A233', 'Gender': 'M', 'Age': '22', 'Sales': '245', 'BMI': 'Normal', 'Salary': '23',\r\n 'Birthday': '24-06-1995'}, 2: {'ID': 'A244', 'Gender': 'M', 'Age': '30', 'Sales': '666',\r\n 'BMI': 'Underweight', 'Salary': '23', 'Birthday': '05-05-1988'},\r\n 3: {'ID': 'A253', 'Gender': 'M', 'Age': '35', 'Sales': '456', 'BMI': 'Obesity', 'Salary': '23',\r\n 'Birthday': '01-08-1983'}, 4: {'ID': 'A262', 'Gender': 'M', 'Age': '24', 'Sales': '999',\r\n 'BMI': 'Normal', 'Salary': '23', 'Birthday': '24-05-1993'},\r\n 5: {'ID': 'A233', 'Ge1nder': 'F', 'Age': '62', 'Sales': '245', 'BMI': 'Normal', 'Salary': '23',\r\n 'Birthday': '24-06-1995'}}\r\n result = Validator.save_dict(data)\r\n self.assertDictEqual(expected, result)",
"def harmonize_keys(self):\n self._data.key_regex_replacements = _key_regex_replacements\n self._data.key_replacements = _key_replacements",
"def _make_key(self, record_dict: Dict[str, Any]) -> int:\n return self._keys.setdefault(frozenset(record_dict.keys()), len(self._keys))",
"def store_matches(\n self,\n parameter_handler: ParameterHandler,\n topology: \"Topology\",\n ) -> None:\n if self.key_map:\n # TODO: Should the key_map always be reset, or should we be able to partially\n # update it? Also Note the duplicated code in the child classes\n self.key_map: dict[\n Union[TopologyKey, LibraryChargeTopologyKey],\n PotentialKey,\n ] = dict()\n matches = parameter_handler.find_matches(topology)\n for key, val in matches.items():\n topology_key = TopologyKey(atom_indices=key)\n potential_key = PotentialKey(\n id=val.parameter_type.smirks,\n associated_handler=parameter_handler.TAGNAME,\n )\n self.key_map[topology_key] = potential_key\n\n if self.__class__.__name__ in [\n \"SMIRNOFFBondCollection\",\n \"SMIRNOFFAngleCollection\",\n ]:\n valence_terms = self.valence_terms(topology)\n\n _check_all_valence_terms_assigned(\n handler=parameter_handler,\n assigned_terms=matches,\n topology=topology,\n valence_terms=valence_terms,\n )",
"def test_keys(self):\n storage = Storage()\n keys_to_set = {'1': 'hello',\n '2': 'bye',\n '3': [1, 2, 'three'],\n '4': {1: 'one', 2: 'two'},\n 'a': 1,\n 'aa': 2,\n 'abc':3,\n 'hello':4}\n for key in keys_to_set:\n storage.set(key, keys_to_set[key])\n\n pattern_answers = {'?': ['1','2','3','4','a'],\n '*': list(keys_to_set.keys()),\n '[13]': ['1', '3'],\n '[^a]': ['1','2','3','4'],\n '[1-3]': ['1','2','3'],\n '?[ae]*': ['aa', 'hello']}\n for pattern in pattern_answers:\n self.assertEqual(pattern_answers[pattern],\n storage.keys(pattern), f'For pattern \"{pattern}\" expected {pattern_answers[pattern]}.')",
"def __init__(self):\n self._keys = []\n self._sortKeys = []",
"def key_lookup_batch(self, batchiter):\n pass",
"def create_matcher(self):\n self.matcher = None\n if \"matcher\" in self.config:\n self.matcher = matcher.Matcher(self.config[\"matcher\"])\n else:\n self.matcher = matcher.TrueMatcher()\n \n self.use_fields_for_id = []\n if \"matcherfield\" in self.config:\n self.use_fields_for_id = self.config[\"matcherfield\"].split(\",\")\n \n if \"clear\" in self.config:\n self.clear_matcher = matcher.Matcher(self.config[\"clear\"])\n self.autoclear = self.auto_acknowledge\n else:\n self.clear_matcher = matcher.FalseMatcher()\n self.autoclear = False",
"def store_matches(\n self,\n parameter_handler: ParameterHandler,\n topology: Topology,\n ) -> None:\n if self.key_map:\n # TODO: Should the key_map always be reset, or should we be able to partially\n # update it? Also Note the duplicated code in the child classes\n self.key_map: dict[BondKey, PotentialKey] = dict() # type: ignore[assignment]\n matches = parameter_handler.find_matches(topology)\n for key, val in matches.items():\n param = val.parameter_type\n if param.k_bondorder or param.length_bondorder:\n bond = topology.get_bond_between(*key)\n fractional_bond_order = bond.fractional_bond_order\n if not fractional_bond_order:\n assert self._get_uses_interpolation(parameter_handler)\n raise RuntimeError(\n \"Bond orders should already be assigned at this point\",\n )\n else:\n fractional_bond_order = None\n topology_key = BondKey(\n atom_indices=key,\n bond_order=fractional_bond_order,\n )\n\n potential_key = PotentialKey(\n id=val.parameter_type.smirks,\n associated_handler=parameter_handler.TAGNAME,\n bond_order=fractional_bond_order,\n )\n self.key_map[topology_key] = potential_key\n\n valence_terms = self.valence_terms(topology)\n\n _check_all_valence_terms_assigned(\n handler=parameter_handler,\n topology=topology,\n assigned_terms=matches,\n valence_terms=valence_terms,\n )",
"def lookup(self, key):",
"def __init__(self):\n super(KeyIterator, self).__init__()\n self.iterator = self.ValueIterator()",
"def clean_up(self) -> None:\n self.single_device_matches = collections.defaultdict(\n lambda: collections.defaultdict(list)\n )",
"def __init__(self, compound_matcher):\n self._matcher = compound_matcher",
"def find_records(self, check, keys=None):\n matches = self._match(check)\n if keys:\n return [self._extract_subdict(rec, keys) for rec in matches]\n else:\n return matches",
"def __init__(self):\n # map of (key, index in list)\n self.map = dict()\n \n # list of keys for random selection\n self.keys = []",
"def test_keys_eq(self):\n self.assertListEqual(self.result, self.expected)",
"def _rebuild_comparedict(self,\n result,\n rewrapped_columns,\n columns,\n rewrapped_keys,\n keys,\n missing_col):\n normalize = lambda x: x if (isinstance(x, str) or not x) else tuple(x)\n rewrapped_columns = normalize(rewrapped_columns)\n rewrapped_keys = normalize(rewrapped_keys)\n columns = normalize(columns)\n keys = normalize(keys)\n\n if rewrapped_keys == keys and rewrapped_columns == columns:\n if isinstance(result, CompareDict):\n key_names = (keys,) if isinstance(keys, str) else keys\n result.key_names = key_names\n return result # <- EXIT!\n\n try:\n item_gen = iter(result.items())\n except AttributeError:\n item_gen = [(self._missing, result)]\n\n if rewrapped_keys != keys:\n def rebuild_keys(k, missing):\n if isinstance(keys, str):\n return k\n key_dict = dict(zip(rewrapped_keys, k))\n return tuple(key_dict.get(c, missing) for c in keys)\n missing_key = self._missing\n item_gen = ((rebuild_keys(k, missing_key), v) for k, v in item_gen)\n\n if rewrapped_columns != columns:\n def rebuild_values(v, missing):\n if isinstance(columns, str):\n return v\n if not nonstringiter(v):\n v = (v,)\n value_dict = dict(zip(rewrapped_columns, v))\n return tuple(value_dict.get(v, missing) for v in columns)\n item_gen = ((k, rebuild_values(v, missing_col)) for k, v in item_gen)\n\n return CompareDict(item_gen, key_names=keys)"
] | [
"0.5777237",
"0.5721886",
"0.5531455",
"0.548444",
"0.54103255",
"0.54072803",
"0.5384197",
"0.53711194",
"0.5370351",
"0.53101146",
"0.5296817",
"0.52691966",
"0.5252932",
"0.5242916",
"0.5215903",
"0.52110225",
"0.5207979",
"0.5202271",
"0.5188836",
"0.5164202",
"0.51598597",
"0.5157279",
"0.5147622",
"0.51181424",
"0.510944",
"0.5092469",
"0.50793564",
"0.50649875",
"0.5019659",
"0.5011922"
] | 0.7259984 | 0 |
Check if the origin_imgs are flipped correctly. | def _check_flip(origin_imgs, result_imgs):
h, w, c = origin_imgs.shape
for i in range(h):
for j in range(w):
for k in range(c):
if result_imgs[i, j, k] != origin_imgs[i, w - 1 - j, k]:
return False
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_flip(origin_imgs, result_imgs, flip_type):\n n, _, _, _ = np.shape(origin_imgs)\n if flip_type == 'horizontal':\n for i in range(n):\n if np.any(result_imgs[i] != np.fliplr(origin_imgs[i])):\n return False\n else:\n # yapf: disable\n for i in range(n):\n if np.any(result_imgs[i] != np.transpose(np.fliplr(np.transpose(origin_imgs[i], (1, 0, 2))), (1, 0, 2))): # noqa:E501\n return False\n # yapf: enable\n return True",
"def reversible(self) -> bool:\n xy_row = np.column_stack(\n (\n np.linspace(\n -self.imgsz[0] / (2 * self.f[0]),\n self.imgsz[0] / (2 * self.f[0]),\n int(self.imgsz[0]),\n ),\n np.zeros(int(self.imgsz[0])),\n )\n )\n dxy = self._distort(xy_row)\n continuous_row = np.all(dxy[1:, 0] >= dxy[:-1, 0])\n xy_col = np.column_stack(\n (\n np.zeros(int(self.imgsz[1])),\n np.linspace(\n -self.imgsz[1] / (2 * self.f[1]),\n self.imgsz[1] / (2 * self.f[1]),\n int(self.imgsz[1]),\n ),\n )\n )\n dxy = self._distort(xy_col)\n continuous_col = np.all(dxy[1:, 1] >= dxy[:-1, 1])\n return continuous_row and continuous_col",
"def test_flip_vertical() -> None:\n original = create_image(3, 2)\n set_color(original, 0, 0, create_color(0, 0, 0))\n set_color(original, 1, 0, create_color(90, 90, 90))\n set_color(original, 2, 0, create_color(255, 255, 255))\n set_color(original, 0, 1, create_color(10, 10, 10))\n set_color(original, 1, 1, create_color(0, 0, 0))\n set_color(original, 2, 1, create_color(90, 90, 90))\n \n expected = create_image(3, 2)\n set_color(expected, 0, 0, create_color(10, 10, 10))\n set_color(expected, 1, 0, create_color(0, 0, 0))\n set_color(expected, 2, 0, create_color(90, 90, 90))\n set_color(expected, 0, 1, create_color(0, 0, 0))\n set_color(expected, 1, 1, create_color(90, 90, 90))\n set_color(expected, 2, 1, create_color(255, 255, 255))\n \n flipped_vertical = flip_vertical(original)\n \n for x, y, col in flipped_vertical: # tests each colour of each pixel of the filtered sample image and compares it to the expected image\n check_equal('Checking pixel @(' + str(x) + ', ' + str(y) + ')', col, get_color(expected, x, y))",
"def verify_legal_rotation(self, direction):\n test_figure = None\n if direction == \"CW\":\n test_figure = self.get_block_positions(self.active_piece.get_cw_rotation())\n elif direction == \"CCW\":\n test_figure = self.get_block_positions(self.active_piece.get_ccw_rotation())\n\n for b_x, b_y in test_figure:\n if b_x < 0 or b_x >= self.WIDTH:\n return False\n\n if b_y < 0 or b_y >= self.HEIGHT:\n return False\n\n if self.board[b_y][b_x] != 0:\n return False\n return True",
"def drone_has_flipped(self, current_orientation):\n has_flipped = True\n\n self.max_roll = rospy.get_param(\"/drone/max_roll\")\n self.max_pitch = rospy.get_param(\"/drone/max_pitch\")\n\n rospy.logwarn(\"#### HAS FLIPPED? ########\")\n rospy.logwarn(\"RPY current_orientation\"+str(current_orientation))\n rospy.logwarn(\"max_roll\"+str(self.max_roll) +\n \",min_roll=\"+str(-1*self.max_roll))\n rospy.logwarn(\"max_pitch\"+str(self.max_pitch) +\n \",min_pitch=\"+str(-1*self.max_pitch))\n rospy.logwarn(\"############\")\n\n if current_orientation.x > -1*self.max_roll and current_orientation.x <= self.max_roll:\n if current_orientation.y > -1*self.max_pitch and current_orientation.y <= self.max_pitch:\n has_flipped = False\n\n return has_flipped",
"def checkImages(self):\r\n\r\n self.leftImage, self.rightImage, res = self.receiver.getImageData()\r\n\r\n return res",
"def image_comparison(self):\n for result in self.cards:\n if result.image_status:\n return True\n return False",
"def _compare_jpg_decode_with_pil(test_case, images, print_debug_info=False):\n of_decoded_images = _of_image_decode(images)\n pil_images = [Image.open(image) for image in images]\n # convert image to BGR\n pil_decoded_images = [np.array(image)[:, :, ::-1] for image in pil_images]\n\n for of_decoded_image, pil_decoded_image in zip(\n of_decoded_images, pil_decoded_images\n ):\n of_decoded_image = of_decoded_image.squeeze()\n test_case.assertTrue(len(of_decoded_image.shape) == 3)\n test_case.assertTrue(len(pil_decoded_image.shape) == 3)\n\n diff = of_decoded_image - pil_decoded_image\n diff_index = np.where(diff != 0)\n diff_abs_values = diff[diff_index]\n\n if print_debug_info:\n print(\"of_decoded_image:\\n\", of_decoded_image, of_decoded_image.shape)\n print(\"pil_decoded_image:\\n\", pil_decoded_image, pil_decoded_image.shape)\n print(\"diff_index:\\n\", diff_index)\n print(\"diff_abs_values:\\n\", diff_abs_values)\n print(\n \"of_decoded_image diff:\\n\",\n of_decoded_image[diff_index[0], diff_index[1]],\n )\n print(\n \"pil_decoded_image diff:\\n\",\n pil_decoded_image[diff_index[0], diff_index[1]],\n )\n\n # only green channel has difference of 1\n test_case.assertTrue(np.all(diff_index[-1] == 1))\n test_case.assertTrue(np.all(diff_abs_values == 1))",
"def are_compatible_imgs(one_img, another_img):\n return have_same_shapes(one_img, another_img)",
"def checkForced(self, source, forced):\n self.assertEqual(source.get(\"flux.naive\"),\n self.image.get(self.x, self.y) if forced else self.image.get(self.xcen, self.ycen))",
"def _check_same_fov(*args, **kwargs):\n raise_error = kwargs.pop(\"raise_error\", False)\n for i, arg in enumerate(args):\n kwargs[f\"img_#{i}\"] = arg\n errors = []\n for (a_name, a_img), (b_name, b_img) in itertools.combinations(\n kwargs.items(), 2\n ):\n if not a_img.shape[:3] == b_img.shape[:3]:\n errors.append((a_name, b_name, \"shape\"))\n if not np.allclose(a_img.affine, b_img.affine):\n errors.append((a_name, b_name, \"affine\"))\n if len(errors) > 0 and raise_error:\n raise ValueError(\n \"Following field of view errors were detected:\\n\"\n + \"\\n\".join(\n [\n f\"- {e[0]} and {e[1]} do not have the same {e[2]}\"\n for e in errors\n ]\n )\n )\n return len(errors) == 0",
"def readout_flipped(self, iamp):\n flipped = ct.c_int()\n self.lib.IsReadoutFlippedByAmplifier(ct.c_int(iamp),\n ct.pointer(flipped))\n return bool(flipped.value)",
"def ff_correct_image(image):\n pass",
"def ff_correct_image(image):\n pass",
"def invertible(self):\n a = self._data\n return a.shape[0] == a.shape[1] and np.linalg.matrix_rank(a) == a.shape[0]",
"def __check_type__(im, ft_axes, orig, name, real_axis=0, shift_axes=[]):\n if type(orig) == image.image:\n im = im.view(image.image) # note: view casting -> this is not the viewer!\n if type(orig.name) is str:\n im.name = name + ' of ' + orig.name\n im.info = orig.info\n pxs = []\n\n for a in ft_axes:\n if a not in orig.spectral_axes:\n im.spectral_axes += [a]\n im.shift_axes = shift_axes\n if type(orig.unit) is str:\n im.unit = ''\n for i in range(im.ndim):\n if i in ft_axes:\n if name == 'IRFT' and real_axis == i:\n pxs += [1 / (orig.pixelsize[i] * 2 * (orig.shape[i] - 1))]\n else:\n pxs += [1 / (orig.pixelsize[i] * orig.shape[i])]\n if type(orig.unit) is str:\n im.unit += orig.unit + '^-1 '\n else:\n try: # TODO: FIX THIS!!!\n pxs += [orig.pixelsize[i]]\n except:\n print('Error in setting pixel size')\n if type(orig.unit) is str:\n im.unit += orig.unit + ' '\n im.pixelsize = pxs\n return (im)\n else:\n return (im)\n\n # ifft shift",
"def __compareImage(self, file1, file2):\n # arg=self.__validateString(str_arg)\n # file1, file2=arg.split(' ', 1)\n try:\n img1 = Image.open(file1)\n img2 = Image.open(file2)\n if img1.size != img2.size:\n return False\n by1 = img1.tobytes()\n by2 = img2.tobytes()\n # format r,g,b,255,r,g,b,255, 3 bytes = 1 point, 255=separator, total 4 bytes\n l = len(by1) / 4\n # total points and same points\n tp = 0\n sp = 0\n for j in range(l):\n i = j * 4\n tp += 1\n if by1[i] == by2[i] and by1[i + 1] == by2[i + 1] and by1[i + 2] == by2[i + 2]:\n sp += 1\n # max to 2% diff allowed\n if tp * 0.98 > sp:\n return False\n else:\n return True\n except Exception, e:\n printLog(self.threadName + \"Exception in __compareImage: %s\" % e.message, logging.ERROR)\n traceback.print_exc()\n return False\n finally:\n img1 = None\n img2 = None",
"def _check_consistency_between_imaging_extractors(self):\n return True",
"def compare_images(img1_path, img2_path):\n img1 = Image.open(img1_path)\n img2 = Image.open(img2_path)\n try:\n diff = ImageChops.difference(img1, img2)\n except ValueError:\n return False\n return diff.getbbox() is None",
"def _check_fov(img, affine, shape):\n img = check_niimg(img)\n return img.shape[:3] == shape and np.allclose(img.affine, affine)",
"def is_orientation_ok(image,k=2,is_first=True):\n\n mid_x, mid_y = int(0.5*image.shape[1]), int(0.5*image.shape[0])\n\n # Get moment for first body half \n image_0 = np.array(image)\n image_0[:,:int(mid_x)] = 0\n image_0 = image_0[:,int(mid_x):]\n moment_0 = get_moment(image_0,k)\n\n # Get moment for second body half\n image_1 = np.array(image)\n image_1[:,int(mid_x):] = 0\n image_1 = np.fliplr(image_1)\n image_1 = image_1[:,int(mid_x):]\n moment_1 = get_moment(image_1,k)\n\n # Compute descriminant and flip flag\n discrim = (moment_0 - moment_1)/(moment_0 + moment_1)\n if discrim < 0:\n ok = False\n else:\n ok = True \n return ok, discrim",
"def check_conv_transpose(extract):\n call = extract\n if isinstance(call, tvm.relay.expr.TupleGetItem):\n call = call.tuple_value\n elif call.op.name == \"nn.relu\":\n call = call.args[0]\n if isinstance(call, tvm.relay.expr.TupleGetItem):\n call = call.tuple_value\n elif call.op.name == \"clip\":\n if call.attrs[\"a_min\"] != 0.0 or call.attrs[\"a_max\"] != 6.0:\n return False\n call = call.args[0]\n if isinstance(call, tvm.relay.expr.TupleGetItem):\n call = call.tuple_value\n\n while call.op.name != \"nn.conv2d_transpose\":\n call = call.args[0]\n\n attrs = call.attrs\n if attrs.data_layout != \"NCHW\":\n return False\n\n return True",
"def check_image_invert(image_data, border_width = 30):\n \n _, avg_intensity_borders, avg_intensity_inside = \\\n _auto_threshold_borders(image_data,border_width = border_width);\n \n # if image borders are darker than the mean image, it's a surface tension\n # image:\n if(avg_intensity_inside > avg_intensity_borders):\n return False;\n # else, it's a shadowgraph image:\n else:\n return True;",
"def _check_origin(self, origin):\n try:\n cachevol_props = self.zfssa.get_volume(origin)\n except exception.VolumeNotFound:\n LOG.debug('Origin %s does not exist', origin)\n return\n\n numclones = cachevol_props['numclones']\n LOG.debug('Number of clones: %d', numclones)\n if numclones <= 1:\n # This cache vol does not have any other clone\n self.zfssa.delete_file(origin)\n else:\n cachevol_props = {'numclones': six.text_type(numclones - 1)}\n self.zfssa.set_file_props(origin, cachevol_props)",
"def _check_inverse_transform(self, Z):\n Z_round_trip = self.inverse_func(self.func(Z))\n if not np.allclose(Z_round_trip, Z, equal_nan=True):\n raise UserWarning(\n \"The provided functions are not strictly\"\n \" inverse of each other. If you are sure you\"\n \" want to proceed regardless, set\"\n \" 'check_inverse=False'.\"\n )",
"def problem1():\n\n img = load_image(\"data/a1p1.png\")\n display_image(img)\n\n save_as_npy(\"a1p1.npy\", img)\n\n img1 = load_npy(\"a1p1.npy\")\n display_image(img1)\n\n img2 = mirror_horizontal(img1)\n display_image(img2)\n\n display_images(img1, img2)",
"def assert_data_correct(self) -> bool:\n if not self.training_folder.exists():\n return False\n # 27: number of characters\n # 27*2: 27 original font characters and 27 folders with morphed version\n if len(list(self.training_folder.iterdir())) not in [27, 27 * 2]:\n return False\n # assert that each character folder has the expected number of images inside\n # expected number is repetitions + original, or just original if no morphing\n # took place\n for directory in self.training_folder.iterdir():\n img_count = len(list(directory.iterdir()))\n if img_count != self.repetitions + 1 and img_count != 1:\n return False\n return True",
"def verifyShiftFile(self):\n if self['refimage'] and fu.findFile(self['refimage']):\n return True\n else: return False",
"def _correct_images(images):\n # From the MNIST website: \"Pixels are organized row-wise. Pixel values are 0 to 255. 0 means\n # background (white), 255 means foreground (black).\"\n # The dataset does not transform the image such that 255 is black, so do that here.\n dtype = _assert_dtype(images)\n max_val = 255 if dtype == dtypes.uint8 else 1.0\n return max_val - images",
"def check_origin(self, origin):\n return True"
] | [
"0.7891626",
"0.7111249",
"0.588529",
"0.5814684",
"0.579397",
"0.5752564",
"0.5637771",
"0.5609224",
"0.5603559",
"0.5602122",
"0.55922",
"0.5581342",
"0.5511366",
"0.5511366",
"0.5508238",
"0.54916257",
"0.545488",
"0.54529166",
"0.5440112",
"0.5437185",
"0.5426509",
"0.54239786",
"0.5414612",
"0.5376516",
"0.5331621",
"0.53161705",
"0.5309192",
"0.52966225",
"0.52963984",
"0.52750075"
] | 0.8528314 | 0 |
Runs a single cycle of the sample collection. It should read the monitored file and extract all metrics. | def run_single_cycle(self, collector=None):
self._timestamp = int(time.time())
# There are certain error conditions, such as the system not supporting
# a particular proc file type, that we will never recover from. So,
# just always early exit.
if self._failed:
return {}
filename = self._file_pattern % self._pid
if not collector:
collector = {}
if self._file is None:
try:
self._file = open(filename, "r")
except IOError as e:
# We take a simple approach. If we don't find the file or
# don't have permissions for it, then just don't collect this
# stat from now on. If the user changes the configuration file
# we will try again to read the file then.
self._failed = True
if e.errno == errno.EACCES:
self._logger.error(
"The agent does not have permission to read %s. "
"Maybe you should run it as root.",
filename,
)
elif e.errno == errno.ENOENT:
self._logger.error(
(
"The agent cannot read %s. Your system may not support that proc file "
'type or the process with pid "%s" doesn\'t exist'
),
filename,
self._pid,
)
# Ignore 'process not found' errors (likely caused because the process exited
# but re-raise the exception for all other errors
elif e.errno != errno.ESRCH:
raise e
if self._file is not None:
try:
self._file.seek(0)
return self.gather_sample(self._file, collector=collector)
except IOError as e:
# log the error if the errno isn't 'process not found'. Process not found likely means the
# process exited, so we ignore that because it's within the realm of expected behaviour
if e.errno != errno.ESRCH:
self._logger.error(
"Error gathering sample for file: '%s'\n\t%s"
% (filename, six.text_type(e))
)
# close the file. This will cause the file to be reopened next call to run_single_cycle
self.close()
return collector | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def run(self):\r\n self.collect_data()",
"def gather_sample(self, my_file, collector=None):\n\n pass",
"def collect_samples(self):\n self.__running = True\n with open(self.__filename, 'a') as output:\n next_sample_time = time.time()\n while self.__running:\n sensor_name = self.__sensor.get_sensor_type_name()\n sensor_id = self.__sensor.get_sensor_id()\n data = self.__sensor.retrieve_data_string() \n if DEBUG:\n print('data: \"{}\"'.format(data),\n file = sys.stderr, flush=True)\n when = datetime.datetime.now(datetime.timezone.utc).isoformat()\n result = OUTPUT_FORMAT.format(when,\n sensor_name, \n sensor_id, \n data)\n output.write(result)\n output.flush()\n \n next_sample_time = next_sample_time + self.__interval\n delay_time = next_sample_time - time.time()\n if DEBUG:\n print('delay_time = {}'.format(delay_time),\n file=sys.stderr, flush=True)\n \n if 0 < delay_time: # don't sleep if already next sample time\n time.sleep(delay_time)",
"def run(self):\n self.logger.info(f'Running {self.__class__.__name__}')\n while True:\n last_check = time.time()\n self.collect_new_events()\n while time.time() - last_check < self._check_for_new_events_interval:\n self.logger.debug('Waiting for new events collection: new collection in {}s'.format(\n self._check_for_new_events_interval - (time.time() - last_check)))\n time.sleep(1)",
"def start_monitor_loop(self):\n read_file = read_config_file.ConfigFileReader()\n\n communication_time = read_file.get_send_communication_time()\n metrics_array = read_file.get_metrics()\n\n self.add_metrics_to_monitor_object(communication_time, metrics_array)",
"def run_resample(self):\n\n self.in_data.open()\n self.out_data.open()\n\n try:\n # Get the fields from the input file and set them/write headers in output:\n self.all_fields = self.in_data.fields\n\n self.out_data.set_fields(self.all_fields)\n self.out_data.write_headers()\n\n # Set up the sensor fields by removing non-sensor fields:\n self.set_sensor_only_fields()\n\n # Read the first event from the input file:\n self.get_next_input_event()\n\n # Warn and exit if we have no input data to read:\n if self.next_input_event is None:\n msg = f\"The input file {self.in_file} did not have any data rows\"\n warn(msg)\n\n return\n\n self.first_event_stamp = self.next_input_event[self.stamp_field]\n\n # Set up the sample tracking (here mostly to set the start of the first interval):\n self.reset_sample_tracking()\n\n # Now iterate through the output intervals:\n while True:\n self.process_next_interval()\n except EOFError: # catch when we are at the end of the file\n pass\n finally:\n self.in_data.close()\n self.out_data.close()\n\n print() # make sure we go to a new output line",
"def run(self, repetitions, **kwargs):\n\t\tself.sampler.sample(repetitions, **kwargs)",
"def start(self):\n while True:\n LogService.log_info(\"aggregator\", \"Creating statistics\")\n self.create_statistics()\n LogService.log_info(\"aggregator\", \"Cleaning up\")\n self.cleanup_measurements()\n LogService.log_info(\"aggregator\", \"Sleeping for 60 minutes\")\n time.sleep(60*60)",
"def gather_sample(self):\n\n for _pid in self._select_processes():\n if not self.__trackers.get(_pid):\n self.__trackers[_pid] = ProcessTracker(_pid, self._logger, self.__id)\n\n self._reset_absolute_metrics()\n\n for _tracker in self.__trackers.values():\n _metrics = _tracker.collect()\n self.record_metrics(_tracker.pid, _metrics)\n\n self._calculate_aggregated_metrics()\n self._remove_dead_processes()\n\n self.print_metrics()",
"def run(self):\n operation_manager = self._core.get_operation_manager()\n while True:\n while operation_manager.process_next():\n pass\n sleep(2)",
"def run(self):\n while True:\n try:\n sleep(influx_settings.write_freq)\n self._redis2influx()\n except InterruptExceptions as ex:\n raise ex",
"def run_sample(smp: sample.Sample,\n run_dir: Text,\n summary_file: Optional[Text] = None,\n generate_sample_ns: Optional[int] = None):\n start = time.time()\n # Create a script named 'run.sh' for rerunning the sample.\n args = [\n SAMPLE_RUNNER_MAIN_PATH,\n '--logtostderr',\n '--input_file=sample.x',\n '--options_file=options.pbtxt',\n ]\n\n _write_to_file(run_dir, 'sample.x', smp.input_text)\n _write_to_file(run_dir, 'options.pbtxt', smp.options.to_pbtxt())\n args_filename = 'args.txt'\n _write_to_file(\n run_dir, args_filename, sample.args_batch_to_text(smp.args_batch)\n )\n args.append('--args_file=args.txt')\n ir_channel_names_filename = None\n if smp.ir_channel_names is not None:\n ir_channel_names_filename = 'ir_channel_names.txt'\n _write_to_file(run_dir, ir_channel_names_filename,\n sample.ir_channel_names_to_text(smp.ir_channel_names))\n args.append('--ir_channel_names_file=ir_channel_names.txt')\n args.append(run_dir)\n _write_to_file(\n run_dir,\n 'run.sh',\n f'#!/bin/sh\\n\\n{subprocess.list2cmdline(args)}\\n',\n executable=True)\n logging.vlog(1, 'Starting to run sample')\n logging.vlog(2, smp.input_text)\n runner = sample_runner.SampleRunner(run_dir)\n runner.run_from_files(\n 'sample.x', 'options.pbtxt', args_filename, ir_channel_names_filename\n )\n timing = runner.timing\n\n timing.total_ns = int((time.time() - start) * 1e9)\n if generate_sample_ns:\n # The sample generation time, if given, is not part of the measured total\n # time, so add it in.\n timing.total_ns += generate_sample_ns\n timing.generate_sample_ns = generate_sample_ns\n\n logging.vlog(1, 'Completed running sample, elapsed: %0.2fs',\n time.time() - start)\n\n if summary_file:\n _write_ir_summaries(run_dir, timing, summary_file)",
"def do(self, callback_name, *args):\n logger.info(\"Monitoring on auxiliary data started\")\n value_dict = self._evaluator.evaluate(self.data_stream)\n for key in value_dict.keys():\n value_dict[key] *= self.coverage\n value_dict['coverage'] = self.coverage\n logging.info(\"coverage:{0}\".format(self.coverage))\n for key, value in value_dict.items():\n logging.info(\"{0}:{1}\".format(key,value))\n self.add_records(self.main_loop.log, value_dict.items())\n self.check_stop(value_dict)\n logger.info(\"Monitoring on auxiliary data finished\")",
"def run_sample(smp: sample.Sample,\n run_dir: Text,\n summary_file: Optional[Text] = None,\n generate_sample_ns: Optional[int] = None):\n start = time.time()\n\n _write_to_file(run_dir, 'sample.x', smp.input_text)\n _write_to_file(run_dir, 'options.json', smp.options.to_json())\n if smp.args_batch:\n _write_to_file(run_dir, 'args.txt',\n sample.args_batch_to_text(smp.args_batch))\n\n # Create a script named 'run.sh' for rerunning the sample.\n args = [\n SAMPLE_RUNNER_MAIN_PATH, '--logtostderr', '--input_file=sample.x',\n '--options_file=options.json'\n ]\n if smp.args_batch:\n args.append('--args_file=args.txt')\n args.append(run_dir)\n _write_to_file(\n run_dir,\n 'run.sh',\n f'#!/bin/sh\\n\\n{subprocess.list2cmdline(args)}\\n',\n executable=True)\n logging.vlog(1, 'Starting to run sample')\n logging.vlog(2, smp.input_text)\n runner = sample_runner.SampleRunner(run_dir)\n runner.run_from_files('sample.x', 'options.json', 'args.txt')\n timing = runner.timing\n\n timing.total_ns = int((time.time() - start) * 1e9)\n if generate_sample_ns:\n # The sample generation time, if given, is not part of the measured total\n # time, so add it in.\n timing.total_ns += generate_sample_ns\n timing.generate_sample_ns = generate_sample_ns\n\n logging.vlog(1, 'Completed running sample, elapsed: %0.2fs',\n time.time() - start)\n\n if summary_file:\n _write_ir_summaries(run_dir, timing, summary_file)",
"def run(self):\n\n # How to retrieve your input data.\n input_1_data = self.in_data['input_1']\n\n # How to retrieve your params value.\n param_1 = self.param['param_1']\n\n # How to process data.\n # Just write any number of methods you want and use them here.\n sample_out_data = self.sample_method(input_1_data, param_1)\n\n # Go to the definition of this method to see how to log.\n self.demo_log()\n\n # This is how to set output data.\n self.out_data['output_1'] = sample_out_data",
"def run(self):\n\n step = self.steps['diagnostics_files']\n step.cores = self.config.getint('make_diagnostics_files', 'cores')\n\n # run the step\n super().run()",
"def run():\n logger.info(f\"Process started:\")\n logger.info(f\"Converting Glove file to Word2Vec format\")\n convert_to_word2vec.convert(\n \"./data/source/glove.6B.50d.txt\", \"./data/source/glove.6B.50d.w2vformat.txt\"\n )\n\n logger.info(f\"Extracting Click Stream data\")\n extract_click_stream_data()\n\n logger.info(\"Extracting Wiki articles\")\n extract_wiki_articles()\n\n logger.info(f\"Generating Clickstream dataset\")\n generate_datasets()\n\n logger.info(\"Tokenizing articles\")\n WikiArticlesTokenizer().process()\n\n logger.info(\"Creating dataset with Wiki Articles\")\n create_wiki_articles_dataset()",
"def test_continuous_bulk_parsing(self, aggregator):\n test_data = ensure_bytes(open(NAGIOS_TEST_LOG).read())\n ITERATIONS = 10\n log_file = tempfile.NamedTemporaryFile(mode=\"a+b\")\n\n # Get the config\n config, nagios_cfg = get_config(\"log_file={}\\n\".format(log_file.name), events=True)\n\n # Set up the check\n nagios = NagiosCheck(CHECK_NAME, {}, config['instances'])\n\n for _ in range(ITERATIONS):\n log_file.write(test_data)\n log_file.flush()\n nagios.check(config['instances'][0])\n\n log_file.close()\n assert len(aggregator.events) == ITERATIONS * 503",
"def run(self):\n print(\"%s starting up\" % self.getName())\n for count in range(self.accessCount):\n time.sleep(random.randint(1, self.sleepMax))\n value = self.cell.read(lambda counter: self.cell.data.count)\n print(\"%s is done getting %s\" % (self.getName(), str(value)))",
"def run(self):\n\n while self.source.SAMPLING:\n wx.CallAfter(self.target, self)\n sleep(0.75)\n\n self.Terminate()",
"def gather_sample(self, stat_file, collector=None):\n if not collector:\n collector = {}\n # The file format is just a single line of all the fields.\n line = stat_file.readlines()[0]\n # Chop off first part which is the pid and executable file. The\n # executable file is terminated with a paren so just search for that.\n line = line[(line.find(\") \") + 2) :]\n fields = line.split()\n # Then the fields we want are just at fixed field positions in the\n # string. Just grab them.\n\n # See http://man7.org/linux/man-pages/man5/proc.5.html for reference on field numbers\n # Keep in mind that we chop first 3 values away (pid, command line, state), so you need to\n # subtract 3 from the field numbers from the man page (e.g. on the man page nice is number\n # 19, but in our case it's 16 aka 19 - 3)\n process_uptime = self.__get_uptime_ms() - self.calculate_time_ms(\n int(fields[19])\n )\n\n collector.update(\n {\n Metric(\"app.cpu\", \"user\"): self.__calculate_time_cs(int(fields[11])),\n Metric(\"app.cpu\", \"system\"): self.__calculate_time_cs(int(fields[12])),\n Metric(\"app.uptime\", None): process_uptime,\n Metric(\"app.nice\", None): float(fields[16]),\n Metric(\"app.threads\", None): int(fields[17]),\n Metric(\"app.mem.majflt\", None): int(fields[9]),\n Metric(\"app.io.wait\", None): int(fields[39])\n if len(fields) >= 39\n else 0,\n }\n )\n return collector",
"def run(self):\n\t\tself.data_source.connect()\n\t\twhile self.running:\n\t\t\tself.data_source.read()",
"def start_collector(self, callback=None):\n self.log.info(\"starting-pm-collection\", device_name=self.name)\n if callback is None:\n callback = self.perform_test_omci\n\n if self.lc is None:\n self.lc = LoopingCall(callback)\n\n if self.default_freq > 0:\n self.lc.start(interval=self.default_freq / 10)",
"def _proc_collect(self) -> None:\n while True:\n self.process_num_threads.set(self._process.num_threads())\n self.process_memory_bytes.set(self._process.memory_info().rss)\n self.process_cpu_percent.set(self._process.cpu_percent())\n\n sleep(self.process_scrape_interval)",
"def run(self):\n self.speed_test.start()",
"def loop(self):\n pass",
"def run(self):\n print(\"%s starting up\" % self.getName())\n for count in range(self.accessCount):\n time.sleep(random.randint(1, self.sleepMax))\n value = self.cell.write(lambda counter: counter.increment())\n print(\"%s is done incrementing to %s\" % (self.getName(), str(value)))",
"def _run(self):\n while(self._loop):\n pass",
"def run(self):\n self._setupLogger()\n self.setup()\n\n self.logger.info(self.moduleName + \" starting run loop.\")\n\n while True:\n self.loop()",
"def load_sample(self):\n\n self.load_images(self.folder + \"/sampleSet.txt\")\n self.load_traces(self.folder + \"/sampleLabel.txt\")"
] | [
"0.6506419",
"0.64478827",
"0.6249575",
"0.6136713",
"0.6083549",
"0.60691047",
"0.5937353",
"0.5804329",
"0.5789543",
"0.5710799",
"0.5666293",
"0.56556666",
"0.5654352",
"0.5652799",
"0.5633741",
"0.5620311",
"0.5596868",
"0.55945826",
"0.5588704",
"0.5581774",
"0.5576543",
"0.55601287",
"0.55353713",
"0.5495253",
"0.54940134",
"0.5493805",
"0.5488754",
"0.54880697",
"0.5482203",
"0.5480609"
] | 0.71581453 | 0 |
Returns the number of centiseconds (1/100ths secs) for the given number of jiffies (a weird timing unit used the kernel). | def __calculate_time_cs(self, jiffies):
return int((jiffies * 100.0) / self._jiffies_per_sec) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def calculate_time_ms(self, jiffies):\n\n return int((jiffies * 1000.0) / self._jiffies_per_sec)",
"def jiffies(_load_time=time.time()):\n return int(100*(time.time()-_load_time))",
"def _nsec_to_usec_round(nsec):\n return (nsec + 500) // 10 ** 3",
"def millis(): \n return int(round(monotonic.monotonic() * C.MILLISECONDS))",
"def millis(): \r\n return int(round(monotonic.monotonic() * C.MILLISECONDS))",
"def jiffies(_proc_pid_stat = '/proc/%s/stat'%(os.getpid()),\n _load_time=time.time()):\n try:\n f=open(_proc_pid_stat,'r')\n l = f.readline().split(' ')\n f.close()\n return int(l[13])\n except:\n return int(100*(time.time()-_load_time))",
"def elapsed_micros(start: int, /) -> int:",
"def _probe_wait_time(self):\n r = self.probe_cycle_time / float(len(self.servers)) #self.probe_cycle_time=5\n r = max(.25, r) # Cap it at four per second\n return r",
"def get_total_cpu_clock_cycles():\n try:\n with open(LINUX_STAT_LOCATION, 'r') as f:\n cpu_entries = f.readline().split(' ')\n except IOError:\n return None\n\n cpu_cycles = 0\n for entry in cpu_entries:\n try:\n cpu_cycles += int(entry)\n except ValueError:\n pass\n return cpu_cycles",
"def _get_cpu_interval(self):\n self._polling_execute_frequency = int(self._plugin_conf[u'main'][u'polling_frequency'])\n\n if 5 <= self._polling_execute_frequency < 60:\n return cpmCPUTotalMonIntervalValue # replaces cpmCPUTotal5SecRev\n elif 60 <= self._polling_execute_frequency < 300:\n return cpmCPUTotal1minRev\n elif 300 <= self._polling_execute_frequency:\n return cpmCPUTotal5minRev\n else:\n return cpmCPUTotal1minRev",
"def ticks_per_second(self):\n return self._ticks_per_second",
"def millis(start_time):\n dt = datetime.now() - start_time\n ms = (dt.days * 24 * 60 * 60 + dt.seconds) * 1000 + dt.microseconds / 1000.0\n return ms",
"def curTimeMs():\n\treturn int((datetime.utcnow() - datetime(1970,1,1)).total_seconds() * 1000)",
"def getTime():\n\n return float(time.perf_counter()*1000)",
"def tick(self):\n prev_last_tick = self.last_tick_\n self.last_tick_ = timeit.default_timer()\n latest_tick_period = self.last_tick_ - prev_last_tick\n return latest_tick_period",
"def millis() -> int:",
"def millis():\n return int(round(time() * 1000))",
"def get_clock_divisor(self):\n return self.o.read_register(self.dev_id, CLOCK_DIVISOR)",
"def getTime():\n return float(time.perf_counter()*1000)",
"def getTime():\n return float(time.perf_counter()*1000)",
"def getTime():\n return float(time.perf_counter()*1000)",
"def getTime():\n return float(time.perf_counter()*1000)",
"def getTime():\n return float(time.perf_counter()*1000)",
"def getTime():\n return float(time.perf_counter()*1000)",
"def _unit_sec(self):\n return self.time_base / 60.0",
"def _get_milleseconds(self):\n return int(round(time.time() * 1000))",
"def _time_ms(dt):\n epoch = datetime.datetime.utcfromtimestamp(0)\n diff = dt - epoch\n return diff.total_seconds() * 1000",
"def _unit_ms(self):\n return (self.time_base / 1000.0) / 60.0",
"def __micros():\n return round(time.time() * 1000000)",
"def _STEPS2TIME(step):\n return step/1000."
] | [
"0.7653799",
"0.652349",
"0.6424848",
"0.6171948",
"0.6161103",
"0.6105997",
"0.5744971",
"0.56402147",
"0.5638903",
"0.5606995",
"0.5588372",
"0.5540272",
"0.5520585",
"0.5486425",
"0.54827213",
"0.5427575",
"0.5426606",
"0.5411524",
"0.54085886",
"0.54085886",
"0.54085886",
"0.54085886",
"0.54085886",
"0.54085886",
"0.53986",
"0.5370979",
"0.5370503",
"0.5370475",
"0.5364911",
"0.536195"
] | 0.7872766 | 0 |
Returns the number of milliseconds for the given number of jiffies (a weird timing unit used the kernel). | def calculate_time_ms(self, jiffies):
return int((jiffies * 1000.0) / self._jiffies_per_sec) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __calculate_time_cs(self, jiffies):\n\n return int((jiffies * 100.0) / self._jiffies_per_sec)",
"def jiffies(_load_time=time.time()):\n return int(100*(time.time()-_load_time))",
"def millis(): \n return int(round(monotonic.monotonic() * C.MILLISECONDS))",
"def millis(): \r\n return int(round(monotonic.monotonic() * C.MILLISECONDS))",
"def jiffies(_proc_pid_stat = '/proc/%s/stat'%(os.getpid()),\n _load_time=time.time()):\n try:\n f=open(_proc_pid_stat,'r')\n l = f.readline().split(' ')\n f.close()\n return int(l[13])\n except:\n return int(100*(time.time()-_load_time))",
"def millis() -> int:",
"def millis():\n return int(round(time() * 1000))",
"def _nsec_to_usec_round(nsec):\n return (nsec + 500) // 10 ** 3",
"def time_millis():\n\n return int(time.time() * 1000)",
"def get_time_ms():\n return int(round(time.time() * 1000))",
"def time_ms():\n return int(1000 * time.time())",
"def get_millis(seconds):\n return seconds * 10 ** 3",
"def __timedelta_millis(td):\n return int(round(td.total_seconds(), 3) * 1000)",
"def millisecond():\n return int(round(time.time() * 1000))",
"def _nowms():\n return int(time.time() * 1000)",
"def elapsed_micros(start: int, /) -> int:",
"def curTimeMs():\n\treturn int((datetime.utcnow() - datetime(1970,1,1)).total_seconds() * 1000)",
"def _time_ms(dt):\n epoch = datetime.datetime.utcfromtimestamp(0)\n diff = dt - epoch\n return diff.total_seconds() * 1000",
"def curr_time_millis():\n return 1000 * timeit.default_timer()",
"def _get_milleseconds(self):\n return int(round(time.time() * 1000))",
"def millis(self):\n return self._micros // 1000",
"def unix_time_millisecond(date):\r\n return unix_time(date, float=True) * 1e3",
"def millis(start_time):\n dt = datetime.now() - start_time\n ms = (dt.days * 24 * 60 * 60 + dt.seconds) * 1000 + dt.microseconds / 1000.0\n return ms",
"def __micros():\n return round(time.time() * 1000000)",
"def getNowMilliseconds():\n return (datetime.datetime.utcnow() - Common.epoch_).total_seconds() * 1000.0",
"def ms(self):\n # my clock uses seconds internally\n return 1000 * self.read()",
"def dt_epoch_msecs(value):\n return long(calendar.timegm(value.timetuple())) * 1000",
"def elapsed_millis(start: int, /) -> int:",
"def timestamp(millis=False):\n return int(round(time.time() * (millis and 1000 or 1)))",
"def transmission_time_us(self, num_bytes):\n bits_to_transmit = num_bytes * 8\n transmission_time_us = (bits_to_transmit / self.megabits_per_second)\n return transmission_time_us"
] | [
"0.73772615",
"0.7025039",
"0.6870207",
"0.6851476",
"0.6639381",
"0.6579558",
"0.6533067",
"0.63947713",
"0.6242466",
"0.61780435",
"0.6164305",
"0.6149434",
"0.61466753",
"0.6125577",
"0.61053866",
"0.6076043",
"0.6049589",
"0.60407495",
"0.60054356",
"0.59866726",
"0.5976186",
"0.59511286",
"0.59292465",
"0.59017617",
"0.58917755",
"0.58187175",
"0.58177817",
"0.5814596",
"0.57947546",
"0.5781713"
] | 0.82893556 | 0 |
Gathers the metrics from the stat file. | def gather_sample(self, stat_file, collector=None):
if not collector:
collector = {}
# The file format is just a single line of all the fields.
line = stat_file.readlines()[0]
# Chop off first part which is the pid and executable file. The
# executable file is terminated with a paren so just search for that.
line = line[(line.find(") ") + 2) :]
fields = line.split()
# Then the fields we want are just at fixed field positions in the
# string. Just grab them.
# See http://man7.org/linux/man-pages/man5/proc.5.html for reference on field numbers
# Keep in mind that we chop first 3 values away (pid, command line, state), so you need to
# subtract 3 from the field numbers from the man page (e.g. on the man page nice is number
# 19, but in our case it's 16 aka 19 - 3)
process_uptime = self.__get_uptime_ms() - self.calculate_time_ms(
int(fields[19])
)
collector.update(
{
Metric("app.cpu", "user"): self.__calculate_time_cs(int(fields[11])),
Metric("app.cpu", "system"): self.__calculate_time_cs(int(fields[12])),
Metric("app.uptime", None): process_uptime,
Metric("app.nice", None): float(fields[16]),
Metric("app.threads", None): int(fields[17]),
Metric("app.mem.majflt", None): int(fields[9]),
Metric("app.io.wait", None): int(fields[39])
if len(fields) >= 39
else 0,
}
)
return collector | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def gather_sample(self, stat_file, collector=None):\n\n if not collector:\n collector = {}\n\n for line in stat_file:\n # Each line has a format of:\n # Tag: Value\n #\n # We parse out all lines looking like that and match the stats we care about.\n m = re.search(r\"^(\\w+):\\s*(\\d+)\", line)\n if m is None:\n continue\n\n field_name = m.group(1)\n int_value = int(m.group(2))\n # FDSize is not the same as the number of open file descriptors. Disable\n # for now.\n # if field_name == \"FDSize\":\n # self.print_sample(\"app.fd\", int_value)\n if field_name == \"VmSize\":\n collector.update({Metric(\"app.mem.bytes\", \"vmsize\"): int_value * 1024})\n elif field_name == \"VmPeak\":\n collector.update(\n {Metric(\"app.mem.bytes\", \"peak_vmsize\"): int_value * 1024}\n )\n elif field_name == \"VmRSS\":\n collector.update(\n {Metric(\"app.mem.bytes\", \"resident\"): int_value * 1024}\n )\n elif field_name == \"VmHWM\":\n collector.update(\n {Metric(\"app.mem.bytes\", \"peak_resident\"): int_value * 1024}\n )\n return collector",
"def gather_sample(self, stat_file, collector=None):\n\n if not collector:\n collector = {}\n\n # File format is single value per line with \"fieldname:\" prefix.\n for x in stat_file:\n fields = x.split()\n if len(fields) == 0:\n continue\n if not collector:\n collector = {}\n if fields[0] == \"rchar:\":\n collector.update({Metric(\"app.disk.bytes\", \"read\"): int(fields[1])})\n elif fields[0] == \"syscr:\":\n collector.update({Metric(\"app.disk.requests\", \"read\"): int(fields[1])})\n elif fields[0] == \"wchar:\":\n collector.update({Metric(\"app.disk.bytes\", \"write\"): int(fields[1])})\n elif fields[0] == \"syscw:\":\n collector.update({Metric(\"app.disk.requests\", \"write\"): int(fields[1])})\n return collector",
"def setup_metrics_file(self):\n\n with open(self.metrics_path, \"w+\") as f_metrics:\n\n f_metrics.write(get_metrics_file_form())",
"def gather_sample(self, stat_file, collector=None):\n\n if not collector:\n collector = {}\n\n for line in stat_file:\n # We just look for the different \"inuse\" lines and output their\n # socket type along with the count.\n m = re.search(r\"(\\w+): inuse (\\d+)\", line)\n if m is not None:\n collector.update(\n {\n Metric(\"app.net.sockets_in_use\", m.group(1).lower()): int(\n m.group(2)\n )\n }\n )\n return collector",
"def emit_metrics(self):\n parse_time = time.perf_counter() - self._parsing_start_time\n Stats.gauge(\"dag_processing.total_parse_time\", parse_time)\n Stats.gauge(\"dagbag_size\", sum(stat.num_dags for stat in self._file_stats.values()))\n Stats.gauge(\n \"dag_processing.import_errors\", sum(stat.import_errors for stat in self._file_stats.values())\n )",
"def collect_stat(self):\n\n cnstat_dict, ratestat_dict = self.get_cnstat()\n self.cnstat_dict.update(cnstat_dict)\n self.ratestat_dict.update(ratestat_dict)",
"def stats(filename):\n from .utils import stats as print_stats\n click.echo('Starting to gather statistics on file {}'.format(filename))\n print_stats(filename)\n click.echo('Statistics printing finished')",
"def add_stats(self):\n units = self.get_unit_map()\n for metric in self.raw_metrics:\n unit, metric_type = units.get(metric, (DEFAULT_UNIT, DEFAULT_TYPE))\n if metric_type == \"counter\":\n # Unit/Second\n unit = \"/\".join((unit, \"Second\"))\n self.add_derive_value(metric, unit, self.raw_metrics[metric], rate=True)\n else:\n self.add_gauge_value(metric, unit, self.raw_metrics[metric])",
"def gather_sample(self, stat_file, collector=None):\n\n # This file format is weird. Each set of stats is outputted in two\n # lines. First, a header line that list the field names. Then a\n # a value line where each value is specified in the appropriate column.\n # You have to match the column name from the header line to determine\n # what that column's value is. Also, each pair of lines is prefixed\n # with the same name to make it clear they are tied together.\n all_lines = stat_file.readlines()\n # We will create an array of all of the column names in field_names\n # and all of the corresponding values in field_values.\n field_names = []\n field_values = []\n\n # To simplify the stats, we add together the two forms of retransmit\n # I could find in the netstats. Those to fast retransmit Reno and those\n # to selective Ack.\n retransmits = 0\n found_retransmit_metric = False\n\n # Read over lines, looking at adjacent lines. If their row names match,\n # then append their column names and values to field_names\n # and field_values. This will break if the two rows are not adjacent\n # but I do not think that happens in practice. If it does, we just\n # won't report the stats.\n for i in range(0, len(all_lines) - 1):\n names_split = all_lines[i].split()\n values_split = all_lines[i + 1].split()\n # Check the row names are the same.\n if names_split[0] == values_split[0] and len(names_split) == len(\n values_split\n ):\n field_names.extend(names_split)\n field_values.extend(values_split)\n\n if not collector:\n collector = {}\n\n # Now go back and look for the actual stats we care about.\n for i in range(0, len(field_names)):\n if field_names[i] == \"InOctets\":\n collector.update({Metric(\"app.net.bytes\", \"in\"): field_values[i]})\n elif field_names[i] == \"OutOctets\":\n collector.update({Metric(\"app.net.bytes\", \"out\"): field_values[i]})\n elif field_names[i] == \"TCPRenoRecovery\":\n retransmits += int(field_values[i])\n found_retransmit_metric = True\n elif field_names[i] == \"TCPSackRecovery\":\n retransmits += int(field_values[i])\n found_retransmit_metric = True\n\n # If we found both forms of retransmit, add them up.\n if found_retransmit_metric:\n collector.update({Metric(\"app.net.tcp_retransmits\", None): retransmits})\n return collector",
"def collect(self):\n if not self._btime:\n return\n\n try:\n with open(\"/proc/self/stat\", 'rb') as stat:\n parts = (stat.read().split(b')')[-1].split())\n\n self.process_metrics[\"vmem\"].set(\"\", float(parts[20]))\n self.process_metrics[\"rss\"].set(\"\", float(parts[21]) * _PAGESIZE)\n start_time_secs = float(parts[19]) / self._ticks\n self.process_metrics[\"start_time\"].set(\n \"\", start_time_secs + self._btime\n )\n utime = float(parts[11]) / self._ticks\n stime = float(parts[12]) / self._ticks\n self.process_metrics[\"cpu\"].set(\"\", utime + stime)\n except IOError:\n pass\n\n try:\n with open(\"/proc/self/limits\", 'rb') as limits:\n for line in limits:\n if line.startswith(b'Max open file'):\n self.process_metrics[\"max_fds\"].set(\n \"\", float(line.split()[3])\n )\n break\n\n self.process_metrics[\"open_fds\"].set(\n \"\", float(len(os.listdir(\"/proc/self/fd\")))\n )\n except (IOError, OSError):\n pass\n\n # Update gc metrics if enabled.\n if \"collected\" in self.process_metrics:\n for generation, stat in enumerate(gc.get_stats()):\n generation = {\"generation\": str(generation)}\n self.process_metrics[\"collected\"].set(\n generation, stat[\"collected\"]\n )\n self.process_metrics[\"uncollectable\"].set(\n generation, stat[\"uncollectable\"]\n )\n self.process_metrics[\"collections\"].set(\n generation, stat[\"collections\"]\n )",
"def readStatFile(filePath):\n f = open(filePath, 'r')\n\n allStats = {}\n overviewStats = {}\n category = ''\n folder = ''\n method = ''\n\n for line in f:\n # Check if the line contains the 'cm' character and thus provides information of the specific folder\n if 'cm' in line:\n words = line.split()\n\n for word in words:\n if '/' in word:\n # All processed folder has either a /MoG or /SubSENSE folder. Exploit this to get the filename\n category = os.path.basename(os.path.dirname(os.path.dirname(os.path.normpath(filePath))))\n folder = os.path.basename(os.path.dirname(os.path.normpath(filePath)))\n method = word\n\n # Get the raw FP, TN, etc. count\n folderNumbers = {'TP': words[4], 'FP': words[5], 'FN': words[6], 'TN': words[7],\n 'ErrorShadow': words[8]}\n overviewStats[method] = folderNumbers\n\n\n # CHeck if line is not empty, does not contain certain characters, and that the folder has been found\n if '#' not in line and 'cm' not in line and line and folder and '\\n' != line and method:\n measures = line.split()\n\n isRealMeasure = True\n\n for measure in measures:\n if not RepresentsFloat(measure):\n isRealMeasure = False\n break\n\n\n if len(measures) == 7 and isRealMeasure:\n folderStats = {'recall': measures[0], 'specificity': measures[1], 'FPR': measures[2], 'FNR': measures[3], \n 'PBC': measures[4], 'precision': measures[5], 'f-measure': measures[6]}\n allStats[method] = folderStats\n\n method = ''\n\n return allStats, overviewStats",
"def file_stat(self, file_path):",
"def update_stat_file(self):\n logfile = \"../data/{}_stat.json\".format(self.ID)\n statobj = {\n 'hp': self.hp,\n 'max_hp': MAX_TANK_HP,\n 'ammo': self.ammo,\n 'score': self.score,\n 'age': self.age,\n 'alive': not self.is_dead(),\n 'color': TANK_COLORS[self.color],\n }\n if USE_SIMULATOR:\n js.globals.handle_stat(self.ID, json.dumps(statobj))\n else:\n with open(logfile, 'w') as f:\n f.write(json.dumps(statobj))",
"def process_stat_files(param):\n\n #get the files that are actually in the output directory\n call = ['cp', '-R']\n call.append(param['working_dir']+'results/featureCount/')\n call.append(param['working_dir']+'report/')\n _, _ = subprocess.Popen(call,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE).communicate()\n\n featurecount_file = (param['working_dir']+\n 'results/featureCount/featureCount_stats.txt')\n #extract table\n table = []\n filehandle = open(featurecount_file)\n #header\n table.append(filehandle.readlines()[0].rstrip().split('\\t'))\n table[0] = table[0][1:]\n filehandle.close()\n\n #total number of aligned reads\n tot_reads = param['bam_qc']['unique_aligned_reads']\n counter = [0] * len(param['bam_qc']['unique_aligned_reads'])\n \n filehandle = open(featurecount_file)\n for line in filehandle.readlines()[1:]:\n cur_line = line.rstrip().split('\\t')\n cur_line[0] = re.sub(r'_',' ',cur_line[0])\n if cur_line[0] not in ['Unassigned MultiMapping','Assigned']:\n counter = [ct + int(cr) for ct, cr in zip(counter, cur_line[1:])]\n perc = ([cur_line[0]]+\n MODULE_HELPER.get_percentage(cur_line[1:],\n tot_reads,\n len(cur_line)-1))\n table.append(perc)\n filehandle.close()\n assigned = [tot_reads[idx] - counter[idx] for idx in range(len(tot_reads))]\n perc = ['Assigned'] + MODULE_HELPER.get_percentage(assigned,\n tot_reads,\n len(counter))\n return table",
"def MainStats(path, filetype, NrExp, col, start, stop):\n# path= path.split('/') # here is better to google and see what is going on. Or experiment alone\n# path= \"/\".join(path[:-1]) \n dato=ExtractData_raw_files(path, filetype)\n dBase=dato.createDictBase()\n stats = Stats(dBase, NrExp, col, start, stop)\n means, stds=stats.Means_Stds()\n times = stats.time_return()\n return means , stds, times",
"def __collect_stats(self, encode, file_name):\n if encode not in self.__hash.keys():\n self.__hash[encode] = []\n self.__hash[encode].append(file_name)\n self.__files_count += 1\n with open(file_name, 'r', encoding=encode) as fr:\n for line in fr:\n self.__lines += 1\n self.__chars += len(line)",
"def read_metrics(self):\n raise NotImplementedError()",
"def report_meta_metrics(stat_path):\n collectd_stats = get_self_stats(stat_path)\n backend_stats = read_vsys_data('backend_stats', _VSYS_FRONTEND_VERSION)\n submit_meta('collectd', collectd_stats)\n submit_meta('backend', backend_stats)",
"def collect_metrics():\n p = os.path.join(os.sep, \"mnt\", \"glusterfs\")\n mount_stats = os.statvfs(p)\n # block size * total blocks\n total_space = mount_stats.f_blocks * mount_stats.f_bsize\n free_space = mount_stats.f_bfree * mount_stats.f_bsize\n # capsize only operates on i64 values\n used_space = total_space - free_space\n gb_used = used_space / 1024 / 1024 / 1024\n\n # log!(format!(\"Collecting metric gb-used {}\", gb_used), Info)\n add_metric(\"gb-used\", \"{}\".format(gb_used))",
"def _summary(in_file):\n data = Counter()\n out_file = in_file + \"_size_stats\"\n if file_exists(out_file):\n return out_file\n with open(in_file) as in_handle:\n for line in in_handle:\n counts = int(line.strip().split(\"_x\")[1])\n line = in_handle.next()\n l = len(line.strip())\n in_handle.next()\n in_handle.next()\n data[l] += counts\n with file_transaction(out_file) as tx_out_file:\n with open(tx_out_file, 'w') as out_handle:\n for l, c in data.items():\n out_handle.write(\"%s %s\\n\" % (l, c))\n return out_file",
"def stats(self):\n pass",
"def stats(self, file, **options):\n\n options['file'] = file\n\n return self._get('stats', **options)",
"def stats(self):",
"def dataStats(self):\n print (\"Performing statistical analysis of the data\")\n # stuff to do",
"def stat(**kwargs):\n print(\"output stats\")",
"def print_file_stats(self):\n\n # current epoch time, file number, filename, filesize, trans secs, status\n print(f\"TRANS_STATS_FILE: {time.time()} {self.batchvals['numfiles']} {self.filevals['filename']} {self.filevals['numbytes']} {self.filevals['end_time'] - self.filevals['start_time']} {self.filevals['status']}\")",
"def stat_file(self, path, info):\n return {}",
"def load_stats():\n assert isinstance(settings.PARS['numBases'], int)\n assert isinstance(settings.PARS['dataset'], str)\n\n stat_filename = 'stat_{}_{}.json'.format(\n settings.PARS['numBases'], settings.PARS['dataset'])\n stat_full_path = os.path.join(settings.GENERATED_DATA_DIRECTORY, stat_filename)\n\n with open(stat_full_path, 'r') as file_:\n fobj_avg = json.load(file_)\n\n fobj_avg = {int(k): v for k, v in fobj_avg.items()}\n\n return fobj_avg",
"def collect_stats(self, cursor):\n metrics = self.config.get('metrics', DEFAULT_METRICS)\n if isinstance(metrics, str):\n if metrics == \"all\":\n # puffer_pool_status is only for 5.5, so we ignore that by default\n metrics = CATEGORIES.keys()\n metrics.remove('buffer_pool_stats')\n else:\n # support comma-separated list\n metrics = re.split(\"\\s*,\\s*\", metrics)\n\n self.logger.debug(\"metrics to collect: %s\" % \", \".join(metrics))\n for cat in metrics:\n if cat in CATEGORIES:\n self.add_category_stats(cat, cursor)\n else:\n self.logger.warning(\"%s is not a valid metric category\" % cat)\n\n if 'newrelic' in metrics:\n self.derive_newrelic_stats()",
"def stat (self, path):\r\n pass"
] | [
"0.6764025",
"0.67190164",
"0.6688609",
"0.65135384",
"0.6446264",
"0.6374985",
"0.63547957",
"0.6333468",
"0.63260734",
"0.6261242",
"0.6237093",
"0.6232534",
"0.61950576",
"0.618388",
"0.6182165",
"0.6152383",
"0.6111384",
"0.6086403",
"0.6057407",
"0.6038634",
"0.60113156",
"0.60033584",
"0.59586376",
"0.5924182",
"0.59238905",
"0.5915536",
"0.5908617",
"0.5899573",
"0.58947885",
"0.58782625"
] | 0.7137697 | 0 |
Gathers the metrics from the sockstat file. | def gather_sample(self, stat_file, collector=None):
if not collector:
collector = {}
for line in stat_file:
# We just look for the different "inuse" lines and output their
# socket type along with the count.
m = re.search(r"(\w+): inuse (\d+)", line)
if m is not None:
collector.update(
{
Metric("app.net.sockets_in_use", m.group(1).lower()): int(
m.group(2)
)
}
)
return collector | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def gather_sample(self, stat_file, collector=None):\n\n # This file format is weird. Each set of stats is outputted in two\n # lines. First, a header line that list the field names. Then a\n # a value line where each value is specified in the appropriate column.\n # You have to match the column name from the header line to determine\n # what that column's value is. Also, each pair of lines is prefixed\n # with the same name to make it clear they are tied together.\n all_lines = stat_file.readlines()\n # We will create an array of all of the column names in field_names\n # and all of the corresponding values in field_values.\n field_names = []\n field_values = []\n\n # To simplify the stats, we add together the two forms of retransmit\n # I could find in the netstats. Those to fast retransmit Reno and those\n # to selective Ack.\n retransmits = 0\n found_retransmit_metric = False\n\n # Read over lines, looking at adjacent lines. If their row names match,\n # then append their column names and values to field_names\n # and field_values. This will break if the two rows are not adjacent\n # but I do not think that happens in practice. If it does, we just\n # won't report the stats.\n for i in range(0, len(all_lines) - 1):\n names_split = all_lines[i].split()\n values_split = all_lines[i + 1].split()\n # Check the row names are the same.\n if names_split[0] == values_split[0] and len(names_split) == len(\n values_split\n ):\n field_names.extend(names_split)\n field_values.extend(values_split)\n\n if not collector:\n collector = {}\n\n # Now go back and look for the actual stats we care about.\n for i in range(0, len(field_names)):\n if field_names[i] == \"InOctets\":\n collector.update({Metric(\"app.net.bytes\", \"in\"): field_values[i]})\n elif field_names[i] == \"OutOctets\":\n collector.update({Metric(\"app.net.bytes\", \"out\"): field_values[i]})\n elif field_names[i] == \"TCPRenoRecovery\":\n retransmits += int(field_values[i])\n found_retransmit_metric = True\n elif field_names[i] == \"TCPSackRecovery\":\n retransmits += int(field_values[i])\n found_retransmit_metric = True\n\n # If we found both forms of retransmit, add them up.\n if found_retransmit_metric:\n collector.update({Metric(\"app.net.tcp_retransmits\", None): retransmits})\n return collector",
"def gather_sample(self, stat_file, collector=None):\n if not collector:\n collector = {}\n # The file format is just a single line of all the fields.\n line = stat_file.readlines()[0]\n # Chop off first part which is the pid and executable file. The\n # executable file is terminated with a paren so just search for that.\n line = line[(line.find(\") \") + 2) :]\n fields = line.split()\n # Then the fields we want are just at fixed field positions in the\n # string. Just grab them.\n\n # See http://man7.org/linux/man-pages/man5/proc.5.html for reference on field numbers\n # Keep in mind that we chop first 3 values away (pid, command line, state), so you need to\n # subtract 3 from the field numbers from the man page (e.g. on the man page nice is number\n # 19, but in our case it's 16 aka 19 - 3)\n process_uptime = self.__get_uptime_ms() - self.calculate_time_ms(\n int(fields[19])\n )\n\n collector.update(\n {\n Metric(\"app.cpu\", \"user\"): self.__calculate_time_cs(int(fields[11])),\n Metric(\"app.cpu\", \"system\"): self.__calculate_time_cs(int(fields[12])),\n Metric(\"app.uptime\", None): process_uptime,\n Metric(\"app.nice\", None): float(fields[16]),\n Metric(\"app.threads\", None): int(fields[17]),\n Metric(\"app.mem.majflt\", None): int(fields[9]),\n Metric(\"app.io.wait\", None): int(fields[39])\n if len(fields) >= 39\n else 0,\n }\n )\n return collector",
"def gather_sample(self, stat_file, collector=None):\n\n if not collector:\n collector = {}\n\n for line in stat_file:\n # Each line has a format of:\n # Tag: Value\n #\n # We parse out all lines looking like that and match the stats we care about.\n m = re.search(r\"^(\\w+):\\s*(\\d+)\", line)\n if m is None:\n continue\n\n field_name = m.group(1)\n int_value = int(m.group(2))\n # FDSize is not the same as the number of open file descriptors. Disable\n # for now.\n # if field_name == \"FDSize\":\n # self.print_sample(\"app.fd\", int_value)\n if field_name == \"VmSize\":\n collector.update({Metric(\"app.mem.bytes\", \"vmsize\"): int_value * 1024})\n elif field_name == \"VmPeak\":\n collector.update(\n {Metric(\"app.mem.bytes\", \"peak_vmsize\"): int_value * 1024}\n )\n elif field_name == \"VmRSS\":\n collector.update(\n {Metric(\"app.mem.bytes\", \"resident\"): int_value * 1024}\n )\n elif field_name == \"VmHWM\":\n collector.update(\n {Metric(\"app.mem.bytes\", \"peak_resident\"): int_value * 1024}\n )\n return collector",
"def get_stats(self):\n\n\t\tserver_data = {}\n\n\t\tyield self.sendall(\"stats\\r\\n\")\n\n\t\twhile True:\n\t\t\tline = yield self.read_line()\n\n\t\t\tif not line or line.strip() == \"END\":\n\t\t\t\tbreak\n\n\t\t\t_stat, name, value = line.split(' ', 2)\n\t\t\tserver_data[name] = value\n\n\t\traise StopIteration(server_data)",
"def stats(self):\n stats = {\n 'lines' : '', # This will count the lines under each split\n 'status_code': self.status_code,\n 'content_type': self.mime,\n 'hop': self.hop_path[-1:],\n 'sum:content_length': self.content_length,\n 'host': self.host(),\n 'source': self.source\n }\n # Add in annotations:\n for annot in self.annotations:\n # Set a prefix based on what it is:\n prefix = ''\n if self.re_tries.match(annot):\n prefix = 'tries:'\n elif self.re_ip.match(annot):\n prefix = \"ip:\"\n # Only emit lines with annotations:\n if annot != \"-\":\n stats[\"%s%s\" % (prefix, annot)] = \"\"\n return stats",
"def read_lnet_stats(f):\n ret = {'send_count': 0, 'recv_count': 0, 'send_length':0, 'recv_length': 0}\n\n pfile = os.path.normpath(f) + \"/stats\"\n with open(pfile, \"r\") as f:\n for line in f:\n chopped = line.split()\n if chopped[3]:\n ret[\"send_count\"] = int(chopped[3])\n if chopped[4]:\n ret[\"recv_count\"] = int(chopped[4])\n if chopped[7]:\n ret[\"send_length\"] = int(chopped[7])\n\t\tif chopped[8]:\n\t\t ret[\"recv_length\"] = int(chopped[8])\t\n \n\n if ret['send_count'] == 0 and ret['recv_count'] == 0 and ret['send_length'] == 0 and ret['recv_length'] == 0 :\n return None\n\n return ret",
"def collect_stat(self):\n\n cnstat_dict, ratestat_dict = self.get_cnstat()\n self.cnstat_dict.update(cnstat_dict)\n self.ratestat_dict.update(ratestat_dict)",
"def gather_sample(self, stat_file, collector=None):\n\n if not collector:\n collector = {}\n\n # File format is single value per line with \"fieldname:\" prefix.\n for x in stat_file:\n fields = x.split()\n if len(fields) == 0:\n continue\n if not collector:\n collector = {}\n if fields[0] == \"rchar:\":\n collector.update({Metric(\"app.disk.bytes\", \"read\"): int(fields[1])})\n elif fields[0] == \"syscr:\":\n collector.update({Metric(\"app.disk.requests\", \"read\"): int(fields[1])})\n elif fields[0] == \"wchar:\":\n collector.update({Metric(\"app.disk.bytes\", \"write\"): int(fields[1])})\n elif fields[0] == \"syscw:\":\n collector.update({Metric(\"app.disk.requests\", \"write\"): int(fields[1])})\n return collector",
"def _get_openvpn_stats(path=\"/var/run/openvpn/server-0.sock\"):\n try:\n logging.debug(\"Getting metrics from %s\", path)\n with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as sock:\n sock.connect(path)\n sock.send(b\"load-stats\\n\")\n sock.setblocking(0)\n\n ready = select.select([sock], [], [], 5.0)\n if ready[0]:\n data = sock.recv(4096)\n if not data:\n logging.debug(\"No result?\")\n return 0\n data = data.decode('utf-8')\n logging.debug(\"Received %s\", data)\n data_match = re.search(r'nclients=(\\d+)', data)\n logging.debug(\"pattern match result %s\", data_match)\n if data_match:\n logging.debug(\"%s connections\", data_match.group(1))\n return int(data_match.group(1))\n except Exception as exc:\n logging.debug(\"Error gathering openvpn stats: %s\", exc)\n\n return 0",
"def _get_ganglia_metrics(hostname, port, file_):\n if file_:\n f = open(file_, 'r')\n return \"\".join(f.readlines())\n else:\n return netcat(hostname, port, '')",
"def get_all_stats(self) -> Dict[str, Any]:\n return self.http.get(self.config.paths.stat)",
"def _request_stats(self, datapath):\n self.logger.debug('send stats request: %016x', datapath.id)\n ofproto = datapath.ofproto\n parser = datapath.ofproto_parser\n\n req = parser.OFPPortDescStatsRequest(datapath, 0)\n datapath.send_msg(req)\n\n req = parser.OFPPortStatsRequest(datapath, 0, ofproto.OFPP_ANY)\n datapath.send_msg(req)\n\n req = parser.OFPFlowStatsRequest(datapath)\n datapath.send_msg(req)",
"def sstat(self):\n coh = self.cohorts[0]\n nsample = count_lines(wtccc2_sample_file(coh, opts.platform)) - 2 \n nfac = count_lines(opts.factor_file)\n if nsample != nfac:\n raise Exception('Number of individuals in sample file (%d) does not match number if factor file (%d)' % (\n (nsample, nfac)))\n for chrom in opts.chroms:\n system('gunzip -c %s | sstat -n %d -p -f %s > %s-%02d.sstat' % (\n gen_gz_file(coh, chrom, opts.platform), nsample, opts.factor_file, coh, chrom),\n verbose=True)",
"def readInServers(self):\n # we'll be using the global server tracker file\n global server_tracker_file\n # first, grab a list of all files in the current working directory\n current_dir = os.listdir('.')\n # verify that our server tracker file exists here\n if server_tracker_file not in current_dir:\n # if there's nothing to read in, simply return\n return\n \n # read in the csv\n with open(server_tracker_file, 'rb') as infile:\n # initialize the reader\n reader = csv.reader(infile)\n # verify that the header looks exactly as we expect\n header = reader.next()\n if header != ['Server','Ping Interval','Status']:\n # if this isn't the case, we won't try to read the file\n return\n else:\n # update our servers with the records we know about\n # while we update, we'll keep a count of how many\n # we can successfully read in\n server_count = 0\n for record in reader:\n # pull out the server name and ping interval\n server = record[0]\n try:\n interval = int(record[1])\n except ValueError:\n continue\n # ping the server to determine whether it is online\n # or offline\n status = sendPing(server)\n if status == 'Online':\n # allocate to online\n self.online_servers[server] = [0, interval]\n else:\n # allocate to offline\n self.offline_servers[server] = [0, interval]\n # udpate our count\n server_count += 1\n # repeat for every record from our pseudo memory dump file\n # report and return\n print 'Read in {0} known servers'.format(server_count)\n \n # file read complete\n return",
"def setup_metrics_file(self):\n\n with open(self.metrics_path, \"w+\") as f_metrics:\n\n f_metrics.write(get_metrics_file_form())",
"def get_stats(self): \n return dict(l.split('\\t', 1) \\\n for l in wait(self.proto.stat()).splitlines() if l)",
"def update_stat_file(self):\n logfile = \"../data/{}_stat.json\".format(self.ID)\n statobj = {\n 'hp': self.hp,\n 'max_hp': MAX_TANK_HP,\n 'ammo': self.ammo,\n 'score': self.score,\n 'age': self.age,\n 'alive': not self.is_dead(),\n 'color': TANK_COLORS[self.color],\n }\n if USE_SIMULATOR:\n js.globals.handle_stat(self.ID, json.dumps(statobj))\n else:\n with open(logfile, 'w') as f:\n f.write(json.dumps(statobj))",
"def emit_metrics(self):\n parse_time = time.perf_counter() - self._parsing_start_time\n Stats.gauge(\"dag_processing.total_parse_time\", parse_time)\n Stats.gauge(\"dagbag_size\", sum(stat.num_dags for stat in self._file_stats.values()))\n Stats.gauge(\n \"dag_processing.import_errors\", sum(stat.import_errors for stat in self._file_stats.values())\n )",
"def extract_tstat_data(pcap_filepath):\n connections = {}\n conn_id = 0\n print('We are here')\n with co.cd(os.path.basename(pcap_filepath[:-5])):\n with co.cd(os.listdir('.')[0]):\n print(connections)\n # Complete TCP connections\n connections, conn_id = extract_tstat_data_tcp_complete('log_tcp_complete', connections, conn_id)\n # Non complete TCP connections (less info, but still interesting data)\n connections, conn_id = extract_tstat_data_tcp_nocomplete('log_tcp_nocomplete', connections, conn_id)\n\n return connections",
"def network_io_counters():\r\n f = open(\"/proc/net/dev\", \"r\")\r\n try:\r\n lines = f.readlines()\r\n finally:\r\n f.close()\r\n\r\n retdict = dict()\r\n for line in lines[2:]:\r\n colon = line.find(':')\r\n assert colon > 0, line\r\n name = line[:colon].strip()\r\n fields = line[colon + 1:].strip().split()\r\n bytes_recv = int(fields[0])\r\n packets_recv = int(fields[1])\r\n errin = int(fields[2])\r\n dropin = int(fields[2])\r\n bytes_sent = int(fields[8])\r\n packets_sent = int(fields[9])\r\n errout = int(fields[10])\r\n dropout = int(fields[11])\r\n retdict[name] = nt_net_iostat(bytes_sent, bytes_recv, packets_sent, packets_recv,\r\n errin, errout, dropin, dropout)\r\n return retdict",
"def get_host_stats(self, refresh=False):",
"def get_cache_stats():\n hostnames = get_memcached_hosts()\n\n if not hostnames:\n return None\n\n all_stats = []\n\n for hostname in hostnames:\n try:\n host, port = hostname.split(':')\n except ValueError:\n # Assume this is a hostname without a port.\n socket_af = socket.AF_INET\n host = hostname\n port = 11211\n\n if host == 'unix':\n socket_af = socket.AF_UNIX\n connect_param = port\n else:\n socket_af = socket.AF_INET\n connect_param = (host, int(port))\n\n s = socket.socket(socket_af, socket.SOCK_STREAM)\n\n try:\n s.connect(connect_param)\n except socket.error:\n logger.error('Unable to connect to \"%s\"' % hostname)\n s.close()\n continue\n\n s.send(b'stats\\r\\n')\n data = s.recv(2048).decode('ascii')\n s.close()\n\n stats = {}\n\n for line in data.splitlines():\n info = line.split(' ')\n\n if info[0] == 'STAT' and len(info) == 3:\n try:\n value = int(info[2])\n except ValueError:\n value = info[2]\n\n stats[info[1]] = value\n\n if stats['cmd_get'] == 0:\n stats['hit_rate'] = 0\n stats['miss_rate'] = 0\n else:\n stats['hit_rate'] = 100 * stats['get_hits'] / stats['cmd_get']\n stats['miss_rate'] = 100 * stats['get_misses'] / stats['cmd_get']\n\n all_stats.append((hostname, stats))\n\n return all_stats",
"def stats(filename):\n from .utils import stats as print_stats\n click.echo('Starting to gather statistics on file {}'.format(filename))\n print_stats(filename)\n click.echo('Statistics printing finished')",
"def meter_stats():\n current_time = time.time()\n r = requests.get('http://localhost:8080/stats/flow/1')\n r.raise_for_status()\n data = r.json()\n bytes_tx = 0\n for stat in data['1']:\n if stat['match'].get('dl_src') == '00:00:00:00:00:01':\n bytes_tx += stat['byte_count']\n global LAST_TIME\n global LAST_BYTES_TX\n time_diff = current_time - LAST_TIME\n byte_diff = bytes_tx - LAST_BYTES_TX\n LAST_TIME = current_time\n LAST_BYTES_TX = bytes_tx\n transfer_rate = byte_diff / time_diff / 1024\n # We need to accomodate the dropping of our rule with the hard timeout\n return jsonify({'transfer_rate': transfer_rate})",
"def get_stats(self):\n return utils.csv_to_dict(wait(self.proto.stat()))",
"def get_file_stat(host, fqpath):\n statformat = '%F:%n:%i:%a:%s:%h:%u:%g:%U:%G'\n command = \"stat -c '%s' %s\" % (statformat, fqpath)\n rcode, rout, rerr = g.run(host, command)\n if rcode == 0:\n stat_data = {}\n stat_string = rout.strip()\n (filetype, filename, inode,\n access, size, links,\n uid, gid, username, groupname) = stat_string.split(\":\")\n\n stat_data['filetype'] = filetype\n stat_data['filename'] = filename\n stat_data[\"inode\"] = inode\n stat_data[\"access\"] = access\n stat_data[\"size\"] = size\n stat_data[\"links\"] = links\n stat_data[\"username\"] = username\n stat_data[\"groupname\"] = groupname\n stat_data[\"uid\"] = uid\n stat_data[\"gid\"] = gid\n\n return stat_data\n\n g.log.error(\"Could not stat file %s: %s\" % (fqpath, rerr))\n return None",
"def possible_metrics(filename):\n\n metrics = {}\n\n raw_data = parse_config(filename)\n\n for graph in raw_data:\n for metric in graph['metrics']:\n metrics.update({metric['label']: [(metric['x_stream'], metric['y_stream'], metric['z_stream']), metric['func']]})\n\n return metrics",
"def extract_tstat_data_tcp_complete(filename, connections, conn_id):\n log_file = open(filename)\n data = log_file.readlines()\n for line in data:\n # Case 1: line start with #; skip it\n if not line.startswith(\"#\"):\n # Case 2: extract info from the line\n info = line.split()\n conn_id += 1\n connection = TCPConnection(conn_id)\n connection.flow.attr[co.TCP_COMPLETE] = True\n connection.flow.attr[co.SADDR] = co.long_ipv6_address(info[0])\n connection.flow.attr[co.DADDR] = co.long_ipv6_address(info[14])\n connection.flow.attr[co.SPORT] = info[1]\n connection.flow.attr[co.DPORT] = info[15]\n connection.flow.detect_ipv4()\n connection.flow.indicates_wifi_or_cell()\n # Except RTT, all time (in ms in tstat) shoud be converted into seconds\n connection.flow.attr[co.START] = timedelta(seconds=float(info[28])/1000)\n connection.flow.attr[co.DURATION] = float(info[30]) / 1000.0\n connection.flow.attr[co.C2S][co.PACKS] = int(info[2])\n connection.flow.attr[co.S2C][co.PACKS] = int(info[16])\n # Note that this count is about unique data bytes (sent in the payload)\n connection.flow.attr[co.C2S][co.BYTES] = int(info[6])\n connection.flow.attr[co.S2C][co.BYTES] = int(info[20])\n # This is about actual data bytes (sent in the payload, including retransmissions)\n connection.flow.attr[co.C2S][co.BYTES_DATA] = int(info[8])\n connection.flow.attr[co.S2C][co.BYTES_DATA] = int(info[22])\n\n connection.flow.attr[co.C2S][co.PACKS_RETRANS] = int(info[9])\n connection.flow.attr[co.S2C][co.PACKS_RETRANS] = int(info[23])\n connection.flow.attr[co.C2S][co.BYTES_RETRANS] = int(info[10])\n connection.flow.attr[co.S2C][co.BYTES_RETRANS] = int(info[24])\n\n connection.flow.attr[co.C2S][co.PACKS_OOO] = int(info[11])\n connection.flow.attr[co.S2C][co.PACKS_OOO] = int(info[25])\n\n connection.flow.attr[co.C2S][co.NB_SYN] = int(info[12])\n connection.flow.attr[co.S2C][co.NB_SYN] = int(info[26])\n connection.flow.attr[co.C2S][co.NB_FIN] = int(info[13])\n connection.flow.attr[co.S2C][co.NB_FIN] = int(info[27])\n connection.flow.attr[co.C2S][co.NB_RST] = int(info[3])\n connection.flow.attr[co.S2C][co.NB_RST] = int(info[17])\n connection.flow.attr[co.C2S][co.NB_ACK] = int(info[4])\n connection.flow.attr[co.S2C][co.NB_ACK] = int(info[18])\n\n # Except RTT, all time (in ms in tstat) shoud be converted into seconds\n connection.flow.attr[co.C2S][co.TIME_FIRST_PAYLD] = float(info[31]) / 1000.0\n connection.flow.attr[co.S2C][co.TIME_FIRST_PAYLD] = float(info[32]) / 1000.0\n connection.flow.attr[co.C2S][co.TIME_LAST_PAYLD] = float(info[33]) / 1000.0\n connection.flow.attr[co.S2C][co.TIME_LAST_PAYLD] = float(info[34]) / 1000.0\n connection.flow.attr[co.C2S][co.TIME_FIRST_ACK] = float(info[35]) / 1000.0\n connection.flow.attr[co.S2C][co.TIME_FIRST_ACK] = float(info[36]) / 1000.0\n\n connection.flow.attr[co.C2S][co.RTT_SAMPLES] = int(info[48])\n connection.flow.attr[co.S2C][co.RTT_SAMPLES] = int(info[55])\n connection.flow.attr[co.C2S][co.RTT_MIN] = float(info[45])\n connection.flow.attr[co.S2C][co.RTT_MIN] = float(info[52])\n connection.flow.attr[co.C2S][co.RTT_MAX] = float(info[46])\n connection.flow.attr[co.S2C][co.RTT_MAX] = float(info[53])\n connection.flow.attr[co.C2S][co.RTT_AVG] = float(info[44])\n connection.flow.attr[co.S2C][co.RTT_AVG] = float(info[51])\n connection.flow.attr[co.C2S][co.RTT_STDEV] = float(info[47])\n connection.flow.attr[co.S2C][co.RTT_STDEV] = float(info[54])\n connection.flow.attr[co.C2S][co.TTL_MIN] = float(info[49])\n connection.flow.attr[co.S2C][co.TTL_MIN] = float(info[56])\n connection.flow.attr[co.C2S][co.TTL_MAX] = float(info[50])\n connection.flow.attr[co.S2C][co.TTL_MAX] = float(info[57])\n\n connection.flow.attr[co.C2S][co.SS_MIN] = int(info[71])\n connection.flow.attr[co.S2C][co.SS_MIN] = int(info[94])\n connection.flow.attr[co.C2S][co.SS_MAX] = int(info[70])\n connection.flow.attr[co.S2C][co.SS_MAX] = int(info[93])\n\n connection.flow.attr[co.C2S][co.CWIN_MIN] = int(info[76])\n connection.flow.attr[co.S2C][co.CWIN_MIN] = int(info[99])\n connection.flow.attr[co.C2S][co.CWIN_MAX] = int(info[75])\n connection.flow.attr[co.S2C][co.CWIN_MAX] = int(info[98])\n\n connection.flow.attr[co.C2S][co.NB_RTX_RTO] = int(info[78])\n connection.flow.attr[co.S2C][co.NB_RTX_RTO] = int(info[101])\n connection.flow.attr[co.C2S][co.NB_RTX_FR] = int(info[79])\n connection.flow.attr[co.S2C][co.NB_RTX_FR] = int(info[102])\n connection.flow.attr[co.C2S][co.NB_REORDERING] = int(info[80])\n connection.flow.attr[co.S2C][co.NB_REORDERING] = int(info[103])\n connection.flow.attr[co.C2S][co.NB_NET_DUP] = int(info[81])\n connection.flow.attr[co.S2C][co.NB_NET_DUP] = int(info[104])\n connection.flow.attr[co.C2S][co.NB_UNKNOWN] = int(info[82])\n connection.flow.attr[co.S2C][co.NB_UNKNOWN] = int(info[105])\n connection.flow.attr[co.C2S][co.NB_FLOW_CONTROL] = int(info[83])\n connection.flow.attr[co.S2C][co.NB_FLOW_CONTROL] = int(info[106])\n connection.flow.attr[co.C2S][co.NB_UNNECE_RTX_RTO] = int(info[84])\n connection.flow.attr[co.S2C][co.NB_UNNECE_RTX_RTO] = int(info[107])\n connection.flow.attr[co.C2S][co.NB_UNNECE_RTX_FR] = int(info[85])\n connection.flow.attr[co.S2C][co.NB_UNNECE_RTX_FR] = int(info[108])\n\n connection.attr[co.C2S][co.BYTES] = {}\n connection.attr[co.S2C][co.BYTES] = {}\n\n connection.flow.attr[co.C2S][co.TIMESTAMP_RETRANS] = []\n connection.flow.attr[co.S2C][co.TIMESTAMP_RETRANS] = []\n\n connection.flow.attr[co.C2S][co.TIME_FIN_ACK_TCP] = timedelta(0)\n connection.flow.attr[co.S2C][co.TIME_FIN_ACK_TCP] = timedelta(0)\n\n connection.flow.attr[co.C2S][co.TIME_LAST_ACK_TCP] = timedelta(0)\n connection.flow.attr[co.S2C][co.TIME_LAST_ACK_TCP] = timedelta(0)\n\n connection.flow.attr[co.C2S][co.TIME_LAST_PAYLD_TCP] = timedelta(0)\n connection.flow.attr[co.S2C][co.TIME_LAST_PAYLD_TCP] = timedelta(0)\n\n connection.flow.attr[co.C2S][co.TIME_LAST_PAYLD_WITH_RETRANS_TCP] = timedelta(0)\n connection.flow.attr[co.S2C][co.TIME_LAST_PAYLD_WITH_RETRANS_TCP] = timedelta(0)\n\n connections[conn_id] = connection\n\n log_file.close()\n return connections, conn_id",
"def _show_general_stats(self):\n\n stat = YuStats()\n template_filename = self._get_config_template('stats')\n text = read_template(\n template_filename,\n title=SERVER_NAME,\n header=SERVER_NAME,\n number_of_links=format_none(stat.links_all),\n number_of_redirects=format_none(stat.redirect_all),\n number_of_redirects_today=format_none(stat.redirect_today),\n number_of_redirects_this_week=format_none(stat.redirect_this_week),\n number_of_redirects_this_month=format_none(stat.redirect_this_month),\n number_of_redirects_this_year=format_none(stat.redirect_this_year),\n number_of_url_today=format_none(stat.links_today),\n number_of_url_this_week=format_none(stat.links_this_week),\n number_of_url_this_month=format_none(stat.links_this_month),\n number_of_url_this_year=format_none(stat.links_this_year),\n date_of_first_redirect=format_none(stat.date_of_first_redirect),\n )\n if text:\n self._send_head(text, 200)\n if not self._header_only:\n try:\n self.wfile.write(text)\n except socket.error:\n # clients like to stop reading after they got a 404\n pass\n else:\n self._send_internal_server_error()",
"def start_monitor_loop(self):\n read_file = read_config_file.ConfigFileReader()\n\n communication_time = read_file.get_send_communication_time()\n metrics_array = read_file.get_metrics()\n\n self.add_metrics_to_monitor_object(communication_time, metrics_array)"
] | [
"0.6393384",
"0.6306739",
"0.60562605",
"0.6037875",
"0.597491",
"0.59416795",
"0.5933875",
"0.5916901",
"0.57845867",
"0.577276",
"0.5749845",
"0.571024",
"0.5700701",
"0.56980515",
"0.5637241",
"0.5629849",
"0.56119055",
"0.55269897",
"0.5518267",
"0.55144465",
"0.5510193",
"0.5505477",
"0.54930115",
"0.5482835",
"0.5477913",
"0.54672444",
"0.5464341",
"0.54504126",
"0.543767",
"0.542593"
] | 0.71067053 | 0 |
Collects the metrics from the gathers | def collect(self):
collector = {}
for gather in self.gathers:
try:
stats = gather.run_single_cycle(collector=collector)
if stats:
collector.update(stats)
except Exception as ex:
self._logger.exception(
"Exception while collecting metrics for PID: %s of type: %s. Details: %s",
self.pid,
type(gather),
repr(ex),
)
return collector | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def collect(self): # pylint: disable=no-self-use\n start = time.time()\n for metric in metric_rq():\n yield metric\n\n gauge = GaugeMetricFamily(\n \"nautobot_rq_metrics_processing_ms\", \"Time in ms to generate the app metrics endpoint\"\n )\n duration = time.time() - start\n gauge.add_metric([], format(duration * 1000, \".5f\"))\n yield gauge",
"def collect(self): # pylint: disable=no-self-use\n start = time.time()\n\n if \"jobs\" in PLUGIN_SETTINGS and PLUGIN_SETTINGS[\"jobs\"]:\n for metric in metric_jobs():\n yield metric\n\n if \"models\" in PLUGIN_SETTINGS:\n for metric in metric_models(PLUGIN_SETTINGS[\"models\"]):\n yield metric\n\n # --------------------------------------------------------------\n # Extras Function defined in configuration.py or the Regristry\n # # --------------------------------------------------------------\n if \"extras\" in PLUGIN_SETTINGS:\n for metric in collect_extras_metric(PLUGIN_SETTINGS[\"extras\"]):\n yield metric\n\n for metric in collect_extras_metric(__REGISTRY__):\n yield metric\n\n gauge = GaugeMetricFamily(\n \"nautobot_app_metrics_processing_ms\", \"Time in ms to generate the app metrics endpoint\"\n )\n duration = time.time() - start\n gauge.add_metric([], format(duration * 1000, \".5f\"))\n yield gauge",
"def compute_metrics(self):\n pass",
"def collect_stats(self, cursor):\n metrics = self.config.get('metrics', DEFAULT_METRICS)\n if isinstance(metrics, str):\n if metrics == \"all\":\n # puffer_pool_status is only for 5.5, so we ignore that by default\n metrics = CATEGORIES.keys()\n metrics.remove('buffer_pool_stats')\n else:\n # support comma-separated list\n metrics = re.split(\"\\s*,\\s*\", metrics)\n\n self.logger.debug(\"metrics to collect: %s\" % \", \".join(metrics))\n for cat in metrics:\n if cat in CATEGORIES:\n self.add_category_stats(cat, cursor)\n else:\n self.logger.warning(\"%s is not a valid metric category\" % cat)\n\n if 'newrelic' in metrics:\n self.derive_newrelic_stats()",
"def gather_sample(self):\n\n for _pid in self._select_processes():\n if not self.__trackers.get(_pid):\n self.__trackers[_pid] = ProcessTracker(_pid, self._logger, self.__id)\n\n self._reset_absolute_metrics()\n\n for _tracker in self.__trackers.values():\n _metrics = _tracker.collect()\n self.record_metrics(_tracker.pid, _metrics)\n\n self._calculate_aggregated_metrics()\n self._remove_dead_processes()\n\n self.print_metrics()",
"def collect_metrics():\n p = os.path.join(os.sep, \"mnt\", \"glusterfs\")\n mount_stats = os.statvfs(p)\n # block size * total blocks\n total_space = mount_stats.f_blocks * mount_stats.f_bsize\n free_space = mount_stats.f_bfree * mount_stats.f_bsize\n # capsize only operates on i64 values\n used_space = total_space - free_space\n gb_used = used_space / 1024 / 1024 / 1024\n\n # log!(format!(\"Collecting metric gb-used {}\", gb_used), Info)\n add_metric(\"gb-used\", \"{}\".format(gb_used))",
"def metrics(self):\n self.metrics = []\n \n self.clients()\n\n if len(self.metrics) > 0:\n return self.metrics\n else:\n return []",
"def collect(self):\n if not self._btime:\n return\n\n try:\n with open(\"/proc/self/stat\", 'rb') as stat:\n parts = (stat.read().split(b')')[-1].split())\n\n self.process_metrics[\"vmem\"].set(\"\", float(parts[20]))\n self.process_metrics[\"rss\"].set(\"\", float(parts[21]) * _PAGESIZE)\n start_time_secs = float(parts[19]) / self._ticks\n self.process_metrics[\"start_time\"].set(\n \"\", start_time_secs + self._btime\n )\n utime = float(parts[11]) / self._ticks\n stime = float(parts[12]) / self._ticks\n self.process_metrics[\"cpu\"].set(\"\", utime + stime)\n except IOError:\n pass\n\n try:\n with open(\"/proc/self/limits\", 'rb') as limits:\n for line in limits:\n if line.startswith(b'Max open file'):\n self.process_metrics[\"max_fds\"].set(\n \"\", float(line.split()[3])\n )\n break\n\n self.process_metrics[\"open_fds\"].set(\n \"\", float(len(os.listdir(\"/proc/self/fd\")))\n )\n except (IOError, OSError):\n pass\n\n # Update gc metrics if enabled.\n if \"collected\" in self.process_metrics:\n for generation, stat in enumerate(gc.get_stats()):\n generation = {\"generation\": str(generation)}\n self.process_metrics[\"collected\"].set(\n generation, stat[\"collected\"]\n )\n self.process_metrics[\"uncollectable\"].set(\n generation, stat[\"uncollectable\"]\n )\n self.process_metrics[\"collections\"].set(\n generation, stat[\"collections\"]\n )",
"def mymetrics(): \n _update_metric_counters()\n logging.debug(prom_objects_seen.collect())\n return flask.Response(generate_latest(), mimetype='text/plain')",
"def collect(self) -> Metric:\n ret = self.source()\n if ret is None:\n LOGGER.warning('Statistics are not available')\n return\n gauge = GaugeMetricFamily('wemo_device_state', 'Status of Wemo device', labels=['address', 'parameter'])\n gauge.add_metric([ret.address, 'today_kwh'], ret.today_kwh, timestamp=ret.collection_time.timestamp())\n gauge.add_metric([ret.address, 'current_power_mW'], ret.current_power,\n timestamp=ret.collection_time.timestamp())\n gauge.add_metric([ret.address, 'today_on_time'], ret.today_on_time, timestamp=ret.collection_time.timestamp())\n gauge.add_metric([ret.address, 'on_for'], ret.on_for, timestamp=ret.collection_time.timestamp())\n gauge.add_metric([ret.address, 'today_standby_time'], ret.today_standby_time,\n timestamp=ret.collection_time.timestamp())\n\n yield gauge\n\n counter = CounterMetricFamily('wemo_power_usage', 'Today power consumption', labels=['address'])\n counter.add_metric([ret.address], ret.today_kwh, timestamp=ret.collection_time.timestamp())\n yield counter",
"def metrics_group():",
"def fetch(self):\n\n\n # Update Prometheus metrics with application metrics\n self.current_requests.set(get_current_requests())\n self.total_uptime.set(get_uptime())\n self.health.state(get_health())",
"def collect_stat(self):\n\n cnstat_dict, ratestat_dict = self.get_cnstat()\n self.cnstat_dict.update(cnstat_dict)\n self.ratestat_dict.update(ratestat_dict)",
"def _dispatch_metrics(self, payload):\n for item in payload:\n try:\n self._ingest.send(gauges=item['gauges'], counters=item['counters'])\n except Exception as e:\n self._logger.error(\"Exception while sending payload to ingest : {0}\".format(e))",
"def metrics(self):\n raise NotImplementedError(\"metrics\")",
"def compute_metrics(self, results: list) -> dict:",
"def run(self):\r\n self.collect_data()",
"def collect(self, model):\n if self.model_reporters:\n for var, reporter in self.model_reporters.items():\n self.model_vars[var].append(reporter(model))\n\n if self.agent_reporters:\n for var, reporter in self.agent_reporters.items():\n agent_records = []\n agent_records.append((model.timer.unique_id, reporter(model.timer)))\n self.agent_vars[var].append(agent_records)",
"def collect_metrics(application):\n\n try:\n subprocess.check_call(['juju', 'collect-metrics', application])\n except subprocess.CalledProcessError as e:\n raise Exception(\"Unable to collect metrics: {}\".format(e))",
"def _get_metrics_to_collect(self, instance_key, additional_metrics):\n if instance_key not in self.metrics_to_collect_by_instance:\n self.metrics_to_collect_by_instance[instance_key] = \\\n self._build_metric_list_to_collect(additional_metrics)\n return self.metrics_to_collect_by_instance[instance_key]",
"def _collect_all(self):",
"def _collect_data(self) -> None:\n self.set_websocket_data()\n self.set_stratum_data()\n self.set_cache_data()\n self.collect_peer_connection_metrics()\n self.set_tx_storage_data()",
"def collect(self):\n pass",
"def _proc_collect(self) -> None:\n while True:\n self.process_num_threads.set(self._process.num_threads())\n self.process_memory_bytes.set(self._process.memory_info().rss)\n self.process_cpu_percent.set(self._process.cpu_percent())\n\n sleep(self.process_scrape_interval)",
"def collectData(self):\n\n self.data.datahash = {} # dict of system data\n\n vmstat_dict = self._getvmstat()\n if vmstat_dict:\n self.data.datahash.update(vmstat_dict)\n\n uptime_dict = self._getuptime()\n if uptime_dict:\n self.data.datahash.update(uptime_dict)\n\n log.log( \"<system>system.collectData(): new system list created\", 7 )",
"def add_stats(self):\n units = self.get_unit_map()\n for metric in self.raw_metrics:\n unit, metric_type = units.get(metric, (DEFAULT_UNIT, DEFAULT_TYPE))\n if metric_type == \"counter\":\n # Unit/Second\n unit = \"/\".join((unit, \"Second\"))\n self.add_derive_value(metric, unit, self.raw_metrics[metric], rate=True)\n else:\n self.add_gauge_value(metric, unit, self.raw_metrics[metric])",
"def get_stats(self):\n # pool.map needs an arg for each function that will be run\n dmx_mean = [self.dmx.mean()] * len(self.genome_paths)\n with ProcessingPool() as pool:\n results = pool.map(genome.mp_stats, self.genome_paths, dmx_mean)\n self.stats = pd.concat(results)\n self.stats.to_csv(self.stats_path)",
"def _process(self):\n export_collect_data(self.kwargs[\"collect\"])",
"def set_metrics(self):",
"def _start_proc_collector(self) -> None:\n thread = threading.Thread(target=self._proc_collect, name=\"ProcessMetricsCollector\", daemon=True)\n thread.start()"
] | [
"0.6958763",
"0.69119155",
"0.68189895",
"0.66842306",
"0.66356134",
"0.6568736",
"0.6555862",
"0.6482938",
"0.64655095",
"0.64260054",
"0.641891",
"0.63836133",
"0.63763535",
"0.62167543",
"0.62135196",
"0.62097865",
"0.6177793",
"0.61742324",
"0.61667204",
"0.61239713",
"0.6101292",
"0.6057514",
"0.6046839",
"0.6036569",
"0.60301644",
"0.6017277",
"0.60003597",
"0.5986058",
"0.5976332",
"0.5970084"
] | 0.7913655 | 0 |
Return the process of the agent. | def current_process(self):
return self._current_process | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _launchAgentProcess( self ):\n return subprocess.Popen( [ sys.executable, os.path.join( sys.path[0], 'agentProcess.py' ), str( _processPid ) ], stdin=subprocess.PIPE, stdout=subprocess.PIPE )",
"def get_process(self, pid):\n return self.processes.get(pid, None)",
"def get_my_process():\n return get_process_object(os.getpid())",
"def get_process():\n data = _get_process_detail_expanded_data()[\"process\"]\n return data",
"def getProcessManager(self): \n \n return self.procmgr",
"def mmo_what_process_am_i(self, mmo_connection):\n return mmo_connection[\"admin\"].command(\"serverStatus\")[\"process\"];",
"def proc(self):\n return self._proc",
"def get_process(self) -> ApplyResult:\n return self._process",
"def pid(self):\n return self._get_process_id()",
"def pid(self):\n return self._process.pid",
"def getPID(self):\r\n self._update('getPID')\r\n return self.supervisord.options.get_pid()",
"def get_process(self):\n\n self.log.debug('Getting application process data')\n cmd_output = admin_tasks.get_process(self.app_name)\n if cmd_output:\n self.log.info('Application process is running')\n print(\"Command output: \\n\" + cmd_output)\n else:\n self.log.info('Application process is not running')",
"def process_id(self):\n return self._process_id",
"def process_id(self):\n return self._process_id",
"def process(self):\n # type: () -> Optional[multiprocessing.Process]\n try:\n return self._process # type: ignore # pylint: disable=no-member\n except:\n return None",
"def get_worker_from_agent(agent: Agent):\n return agent.mephisto_agent.get_worker()",
"def pid(self):\n\n return getpid() if self.__process is None else self.__process.pid",
"def get_process_pid(robot_name):\n\n try:\n result = check_output(['pgrep', 'x{0}'.format(robot_name)])\n return int(result.strip())\n except:\n return None",
"def process(self) -> Union['psutil.Process', None]:\n psutil = attempt_import('psutil')\n pid = self.pid\n if pid is None:\n return None\n if not '_process' in self.__dict__ or self._process.pid != int(pid):\n try:\n self._process = psutil.Process(int(pid))\n except Exception as e:\n if self.pid_path.exists():\n self.pid_path.unlink()\n return None\n return self._process",
"def pid(self):\n if self.proc is None:\n return 0\n return self._pid()",
"def get_process(ngrok_path, config_path=None, auth_token=None, region=None):\n if ngrok_path in _current_processes:\n # Ensure the process is still running and hasn't been killed externally\n if _current_processes[ngrok_path].proc.poll() is None:\n return _current_processes[ngrok_path]\n else:\n _current_processes.pop(ngrok_path, None)\n\n return _start_process(ngrok_path, config_path, auth_token, region)",
"def agent(self):\n return self.__agent",
"def GetChromeProcess(self):\n procs = self.ListProcesses()\n session_manager_pid = self._GetSessionManagerPid(procs)\n if not session_manager_pid:\n return None\n\n # Find the chrome process that is the child of the session_manager.\n for pid, process, ppid, _ in procs:\n if ppid != session_manager_pid:\n continue\n for regex in _CHROME_PROCESS_REGEX:\n path_match = re.match(regex, process)\n if path_match is not None:\n return {'pid': pid, 'path': path_match.group(), 'args': process}\n return None",
"def get_cognitive_process(self):\n if not self.has_cognitive_process():\n raise IllegalState()\n else:\n return Grade(self._get_grade_map(self._my_map['cognitiveProcessId'])),",
"def get(self):\n if self.proc is not None:\n return self.proc.get()\n\n return None",
"def get_processor(self):\n return self._processor",
"def get_process(proc_name):\n #LOG = log.getLogger(__name__)\n procList = []\n try:\n for pr in psutil.process_iter():\n for args in pr.cmdline():\n if proc_name in args:\n procList.append(pr.pid)\n return procList\n except BaseException as e:\n print(\"Error in fetching process: {}\".format(e))\n return None",
"def get_pid(self):\n if self.status():\n file = open(os.path.join(self.data_dir, 'postmaster.pid'))\n pid = int(file.readline())\n return pid\n else:\n return None",
"def spawn(self):\r\n self.before_spawn()\r\n pid = Subprocess.spawn(self)\r\n if pid is None:\r\n #Remove object reference to decrement the reference count on error\r\n self.fcgi_sock = None\r\n return pid",
"def get_process_name(self):\n\n return self._args.t"
] | [
"0.69227177",
"0.68076235",
"0.66409147",
"0.65917194",
"0.6577045",
"0.65301675",
"0.6481512",
"0.6463723",
"0.6425003",
"0.639765",
"0.6345163",
"0.6329305",
"0.63107604",
"0.63107604",
"0.6309831",
"0.6220328",
"0.6186098",
"0.61813504",
"0.6084275",
"0.6083225",
"0.6051368",
"0.6008365",
"0.59980464",
"0.59642017",
"0.59629494",
"0.592352",
"0.58746",
"0.58428466",
"0.5806936",
"0.58057505"
] | 0.69763553 | 0 |
Given a process id, return all children processes (recursively) | def get_child_processes(self, ppid):
all_children = []
children_to_explore = set()
for _pid in self.parent_to_children_map[ppid]:
all_children.append(_pid)
children_to_explore.add(_pid)
# get the children 'recursively'
while children_to_explore: # the invariant
child_to_explore = children_to_explore.pop()
if not self.parent_to_children_map.get(child_to_explore):
continue
unvisited = self.parent_to_children_map[child_to_explore]
for node in unvisited:
if node not in all_children:
children_to_explore.add(node)
all_children.append(node)
return list(set(all_children)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_children(ppid):\n\n pid_dct = {}\n for proc in build_process_list():\n proc[\"_children\"] = []\n pid_dct[proc[\"pid\"]] = proc\n\n # fill in children array\n for pid in list(pid_dct.keys()):\n parent_pid = pid_dct[pid][\"parent_pid\"]\n\n if parent_pid in pid_dct:\n pid_dct[parent_pid][\"_children\"].append(pid)\n\n # now just walk down the tree\n if ppid is None or ppid not in pid_dct:\n # process has quit, we exit\n return []\n\n accepted = []\n to_accept = collections.deque([ppid, ])\n \n while to_accept:\n head = pid_dct[to_accept.popleft()]\n\n # do not include the monitoring pid\n if head[\"pid\"] != ppid:\n accepted.append(head)\n\n to_accept.extend(head.get(\"_children\", []))\n head[\"children\"] = head[\"_children\"]\n del head[\"_children\"]\n\n # deleting children breaks infinite loops\n # but Dima, can a process tree contain a loop? yes - via race-condition in reading procfs\n\n return accepted",
"def get_children(pid):\n try:\n stdout=subprocess.check_output([\"ps\",\"--ppid\",pid,\"-o\",\"pid\"])\n except subprocess.CalledProcessError:\n stdout=[]\n\n pids=[]\n if stdout:\n pids=process_ps_stdout(stdout)\n\n return pids",
"def get_child_pids(pid):\n\n wmi = win32com.client.GetObject('winmgmts:')\n # noinspection SqlNoDataSourceInspection,SqlDialectInspection\n children = wmi.ExecQuery('SELECT * FROM Win32_Process WHERE ParentProcessID = %s' % pid)\n return [child.Properties_('ProcessId').Value for child in children]",
"def find_all_child_processes(pids_only=False):\n children = psutil.Process().children(recursive=True)\n return [child.pid for child in children] if pids_only else children",
"def get_pids(pid):\n\n pids=set([pid])\n for child in get_children(pid):\n pids.update(traverse_tree(child,pids))\n \n return list(pids)",
"def Children( cls, pid ):\n\t\tres = []\n\t\tpid = int(pid)\n\t\tfor cpid, cmd in cls.List().items():\n\t\t\tppid = int(cls.Status(cpid)[\"ppid\"])\n\t\t\tif ppid == pid:\n\t\t\t\tres.append( (cpid, None, cmd))\n\t\treturn res",
"def children_of(self, pid, all=False):\r\n self._raise_unless_has_pid(pid)\r\n if all:\r\n all_children = set()\r\n self._calculate_children(pid, all_children)\r\n return all_children\r\n else:\r\n return copy(self._pid_to_children[pid])",
"def kill_process_children(pid):\n root_process_path = \"/proc/{pid}/task/{pid}/children\".format(pid=pid)\n if not os.path.isfile(root_process_path):\n return\n with open(root_process_path) as children_list_file:\n children_list_pid = children_list_file.read().split()\n\n for child_pid in children_list_pid:\n children_proc_path = \"/proc/%s/task/%s/children\" % (\n child_pid,\n child_pid,\n )\n if not os.path.isfile(children_proc_path):\n continue\n with open(children_proc_path) as children_list_file_2:\n children_list_pid_2 = children_list_file_2.read().split()\n for _pid in children_list_pid_2:\n try:\n os.kill(int(_pid), signal.SIGTERM)\n except ProcessLookupError:\n continue\n try:\n os.kill(int(child_pid), signal.SIGTERM)\n except ProcessLookupError:\n continue",
"def getChildPIDs(self):\n\t\treturn self.pids",
"def get_children(self, go_id=None):\n rec = self.dict_go[go_id]\n set_parents = rec.get_all_children()\n return set_parents",
"def collect_children(self):\n\t\twhile self.active_children:\n\t\t\tif len(self.active_children) < self.max_children:\n\t\t\t\toptions = os.WNOHANG\n\t\t\telse:\n\t\t\t\t# If the maximum number of children are already\n\t\t\t\t# running, block while waiting for a child to exit\n\t\t\t\toptions = 0\n\t\t\ttry:\n\t\t\t\tpid, status = os.waitpid(0, options)\n\t\t\texcept os.error:\n\t\t\t\tpid = None\n\t\t\tif not pid: break\n\t\t\tself.active_children.remove(pid)",
"def get_child_elements_by_id(self, id):\n for item in self._elements:\n if item.get_parent_id() == id:\n yield item",
"def setChildPIDs(self):\n\t\tpids = []\n\t\tfor proc in process_iter():\n\t\t\tfor child in self.expectedChildren:\n\t\t\t\tif child == proc.name():\n\t\t\t\t\tif proc.parent().name() == \"Python\": # Hardcoded string comparison. Sue me.\n\t\t\t\t\t\tpids.append(proc.pid)\n\t\tself.pids = pids",
"def get_child_ids(id,conn):\n\n child_ids = ('WITH RECURSIVE children AS '\n '(SELECT subject_id '\n 'FROM cvterm_relationship '\n 'WHERE object_id = %s '\n 'UNION '\n 'SELECT cr.subject_id '\n 'FROM cvterm_relationship cr '\n 'INNER JOIN children ch ON ch.subject_id = cr.object_id) '\n 'SELECT * FROM children')\n ids = connect(child_ids,id,conn)\n list_of_ids = []\n for item in ids:\n list_of_ids.append(item[0])\n return(list_of_ids)",
"def do_select_children(self, node_id):\n try:\n _children = self.tree.children(node_id)\n except NodeIDAbsentError:\n _children = None\n\n return _children",
"async def get_child_ids(db, post_id):\n sql = \"SELECT id FROM hive_posts WHERE parent_id = :id AND is_deleted = '0'\"\n return await db.query_col(sql, id=post_id)",
"def get_child_package(id):\n\n relationships = []\n try:\n relationships = p.toolkit.get_action(\"package_relationships_list\")(\n data_dict={\"id\": id, \"rel\": \"parent_of\"}\n )\n except Exception, e:\n return {}\n\n children = []\n if relationships:\n for rel in relationships:\n try:\n access = p.toolkit.check_access(\n \"package_show\",\n context={\"user\": c.user},\n data_dict={\"id\": rel[\"object\"]},\n )\n child = p.toolkit.get_action(\"package_show\")(\n data_dict={\"id\": rel[\"object\"]}\n )\n children.append(child)\n except:\n pass\n return children",
"def get_matches_commandline_with_children(self, match_pattern):\n\n matched_pids = self.get_matches_commandline(match_pattern)\n for matched_pid in matched_pids:\n matched_pids.extend(self.get_child_processes(matched_pid))\n return list(set(matched_pids))",
"def list_processes(pid, name):\n \n if not pid and not name:\n rc, out, err = j.sal.process.execute(\"ps ax\")\n click.echo(out)\n elif name:\n click.echo(j.sal.process.psfind(name))\n elif pid:\n click.echo(j.sal.process.getProcessPid(pid))",
"def traverse_tree(pid,nodes):\n\n for child in get_children(pid):\n nodes.update(traverse_tree(child,nodes))\n nodes.add(pid)\n\n return nodes",
"def kill_children(timeout=1) -> List[psutil.Process]:\n procs = child_manager.children_pop_all()\n for p in procs:\n try:\n p.terminate()\n except psutil.NoSuchProcess:\n pass\n gone, alive = psutil.wait_procs(procs, timeout=timeout)\n for p in alive:\n logger.warning(\"Cleaning up child: %d\", p.pid)\n p.kill()\n return alive",
"def get_jobs_by_process_id(self, process_id):\n\n jobs = list()\n for job in Job.objects.filter(process=process_id):\n jobs.append(job)\n return jobs",
"def get_processes():\n yield from psutil.process_iter()",
"def pslist(self) -> Generator[dict, None, None]:\n\n # Function to switch fields to represent a parent\n def _convert_to_parent_fields(process: dict) -> dict:\n output = {}\n for left, right in [\n (FieldNames.PROCESS_IMAGE, FieldNames.PARENT_PROCESS_IMAGE),\n (FieldNames.PROCESS_ID, FieldNames.PARENT_PROCESS_ID),\n (FieldNames.COMMAND_LINE, FieldNames.PARENT_COMMAND_LINE),\n (FieldNames.PROCESS_IMAGE_PATH, FieldNames.PARENT_PROCESS_IMAGE_PATH),\n ]:\n output[right] = process[left]\n\n return output\n\n # Use the pstree dict output to get a mapping from pid -> proc\n procs = self.session.plugins.pstree()._make_process_dict()\n\n parent_procs: Dict[int, dict] = {}\n\n # Add the system idle process\n parent_procs[0] = {\n FieldNames.PARENT_PROCESS_ID: 0,\n FieldNames.PARENT_COMMAND_LINE: \"\",\n FieldNames.PARENT_PROCESS_IMAGE: \"System Idle Process\",\n FieldNames.PARENT_PROCESS_IMAGE_PATH: \"\\\\\",\n }\n\n for proc in procs.values():\n\n parent_pid = proc.InheritedFromUniqueProcessId\n\n # Get the current processes info\n command_line = str(proc.Peb.ProcessParameters.CommandLine)\n image_path = str(proc.Peb.ProcessParameters.ImagePathName)\n\n if int(proc.pid) == 4:\n process_image = \"SYSTEM\"\n process_image_path = \"\\\\\"\n else:\n process_image, process_image_path = split_path(image_path)\n\n current_proc = {\n FieldNames.EVENT_TYPE: EventTypes.PROCESS_LAUNCHED,\n FieldNames.PROCESS_ID: int(proc.pid),\n FieldNames.COMMAND_LINE: command_line,\n FieldNames.PROCESS_IMAGE: process_image,\n FieldNames.PROCESS_IMAGE_PATH: process_image_path,\n }\n\n # Keep track of the processes.\n self.processes[int(proc.pid)] = current_proc\n\n current_as_parent = _convert_to_parent_fields(current_proc)\n parent_procs[int(proc.pid)] = current_as_parent\n\n # Parse the parent process\n if parent_pid not in parent_procs:\n\n # Do we the _EPROCESS for this process?\n if int(parent_pid) in procs:\n parent = procs[int(parent_pid)]\n parent_image_path = parent.Peb.ProcessParameters.ImagePathName\n\n parent_process_image, parent_process_image_path = split_path(\n str(parent_image_path)\n )\n\n parent_proc = {\n FieldNames.PARENT_PROCESS_ID: int(parent.pid),\n FieldNames.PARENT_COMMAND_LINE: parent.Peb.ProcessParameters.CommandLine,\n FieldNames.PARENT_PROCESS_IMAGE: parent_process_image,\n FieldNames.PARENT_PROCESS_IMAGE_PATH: parent_process_image_path,\n }\n\n # If not, make a dummy one with the PID\n else:\n parent_proc = {\n FieldNames.PARENT_PROCESS_ID: int(parent_pid),\n FieldNames.PARENT_COMMAND_LINE: \"\",\n FieldNames.PARENT_PROCESS_IMAGE: \"\",\n FieldNames.PARENT_PROCESS_IMAGE_PATH: \"\",\n }\n\n parent_procs[int(parent_pid)] = parent_proc\n\n yield {**current_proc, **parent_procs[int(parent_pid)]}",
"def process_iter():\r\n def add(pid):\r\n proc = Process(pid)\r\n _pmap[proc.pid] = proc\r\n return proc\r\n\r\n def remove(pid):\r\n _pmap.pop(pid, None)\r\n\r\n a = set(get_pid_list())\r\n b = set(_pmap.keys())\r\n new_pids = a - b\r\n gone_pids = b - a\r\n\r\n for pid in gone_pids:\r\n remove(pid)\r\n for pid, proc in sorted(list(_pmap.items()) + \\\r\n list(dict.fromkeys(new_pids).items())):\r\n try:\r\n if proc is None: # new process\r\n yield add(pid)\r\n else:\r\n # use is_running() to check whether PID has been reused by\r\n # another process in which case yield a new Process instance\r\n if proc.is_running():\r\n yield proc\r\n else:\r\n yield add(pid)\r\n except NoSuchProcess:\r\n remove(pid)\r\n except AccessDenied:\r\n # Process creation time can't be determined hence there's\r\n # no way to tell whether the pid of the cached process\r\n # has been reused. Just return the cached version.\r\n yield proc",
"def children(parent, data):\n\n kids = []\n for pid in data:\n if data[pid][\"parentId1\"] == parent or data[pid][\"parentId2\"] == parent:\n kids.append(pid)\n\n return kids",
"def get_children(self, node_id: np.uint64) -> np.ndarray:\n children = self.read_row(node_id, \"children\", dtype=np.uint64)\n\n if children is None:\n return np.empty(0, dtype=np.uint64)\n else:\n return children",
"def find_child_containers(self, parent_id: str) -> list:\n try:\n return self.docker.containers.list(\n filters={\n 'label': f'{LABEL_PARENT_ID}={parent_id}',\n },\n )\n\n except requests.exceptions.ConnectionError:\n raise ProviderError('Docker engine unavailable')",
"def get_pids(name=None):\n results = []\n for process in win32com.client.GetObject('winmgmts:').InstancesOf('Win32_Process'):\n if name is None or process.Properties_(\"Name\").Value == name:\n results.append(process.Properties_(\"ProcessID\").Value)\n return results",
"def get_pid(name: str) -> Set[int]:\n process_pids = set()\n for proc in psutil.process_iter():\n if name == proc.name():\n pid = proc.pid\n process_pids.add(pid)\n return process_pids"
] | [
"0.8440308",
"0.78711176",
"0.7738845",
"0.73896194",
"0.73563373",
"0.7110651",
"0.6810038",
"0.66680056",
"0.651955",
"0.6309715",
"0.62305254",
"0.603989",
"0.59731925",
"0.5933038",
"0.5905366",
"0.5880942",
"0.5819915",
"0.57845265",
"0.5777258",
"0.5750268",
"0.574295",
"0.57289284",
"0.57132065",
"0.56682825",
"0.56626683",
"0.5651312",
"0.5644418",
"0.5639618",
"0.56385756",
"0.56256604"
] | 0.7898279 | 1 |
Returns a list of all running process ids | def get_running_processes(self):
all_processes = []
for _process in self.processes:
all_processes.append(_process["pid"])
return all_processes | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_pid_list():\r\n pids = [int(x) for x in os.listdir('/proc') if x.isdigit()]\r\n return pids",
"def pids(self):\n return self._pidToProcess.iterkeys()",
"def running_procs(self) -> List[int]:\n return [p.model_id for p in self.primary_scheduler.queue_nodes.run_q]",
"def getActiveProcesses():\n active = []\n\n for p in PROCESSRUNNER_PROCESSES:\n if p.is_alive():\n active.append(p)\n\n return active",
"def get_all_current_processes():\n p = subprocess.Popen(['ps', '-A'], stdout=subprocess.PIPE)\n out, err = p.communicate()\n return out",
"def get_running_unison_processes(self):\n # Get PIDs\n # Note: throws exception if no instances exist\n try:\n pids = str(subprocess.check_output([\"pidof\", '/usr/bin/unison']))\n\n # Parse command output into list by removing junk chars and exploding\n # string with space delimiter\n pids = pids[2:-3].split(' ')\n\n except subprocess.CalledProcessError:\n # If error caught here, no unison instances are found running\n pids = []\n\n self.logger.debug(\n \"Found \" + str(len(pids)) + \" running instances on this system: PIDs \" +\n \", \".join(pids)\n )\n\n # Return, after converting to ints\n return list(map(int, pids))",
"def get_processes_running():\r\n p = [] #array of processes\r\n if platform == \"linux\" or platform == \"linux2\":\r\n for proc in psutil.process_iter():\r\n try:\r\n tmp=Process(proc.name(),int(proc.pid),proc.username(),int(0),int(0))\r\n p.append(tmp)\r\n except:\r\n continue\r\n return (p)\r\n\t\t\t\r\n tasks = check_output(['tasklist']).decode('cp866', 'ignore').split(\"\\r\\n\")\r\n for task in tasks:\r\n m = re.match(b'(.*?)\\\\s+(\\\\d+)\\\\s+(\\\\w+)\\\\s+(\\\\w+)\\\\s+(.*?)\\\\s.*', task.encode())\r\n if m is not None:\r\n tmp=Process(m.group(1).decode(),int(m.group(2).decode()),m.group(3).decode(),int(m.group(4).decode()),int(m.group(5).decode('ascii', 'ignore')))\r\n p.append(tmp)\r\n #m.group(1).decode() image name\r\n #m.group(2).decode() process id\r\n #m.group(3).decode() session_name\r\n #m.group(4).decode() session_num\r\n #m.group(5).decode('ascii', 'ignore') memory usage\r\n return(p)",
"def running_processes(self):\n return [process for process in self.processes.values()\n if process.running_on(self.address_name)]",
"def pids():\n stream = os.popen(\"ps aux | grep '[m]itm' | awk '{print $2}'\")\n return stream.read()",
"def pids(self, node):\n try:\n cmd = \"ps ax | grep -i 'redpanda\\|node' | grep -v grep | awk '{print $1}'\"\n pid_arr = [\n pid for pid in node.account.ssh_capture(\n cmd, allow_fail=True, callback=int)\n ]\n return pid_arr\n except (RemoteCommandError, ValueError):\n return []",
"def pid_processes(self):\n return [(process.namespec(), process.infos[self.address_name]['pid'])\n for process in self.processes.values()\n if process.pid_running_on(self.address_name)]",
"def PIDs():\n from ctypes import windll,c_ulong,byref,sizeof\n PIDs = (c_ulong*512)()\n size_of_PIDs = c_ulong()\n windll.psapi.EnumProcesses(byref(PIDs),sizeof(PIDs),byref(size_of_PIDs))\n nPIDs = size_of_PIDs.value/sizeof(c_ulong())\n pidProcess = sorted([int(i) for i in PIDs][:nPIDs])\n return pidProcess",
"def get_pids(name=None):\n results = []\n for process in win32com.client.GetObject('winmgmts:').InstancesOf('Win32_Process'):\n if name is None or process.Properties_(\"Name\").Value == name:\n results.append(process.Properties_(\"ProcessID\").Value)\n return results",
"def ListProcesses(self):\n stdout, stderr = self.RunCmdOnDevice(\n ['/bin/ps', '--no-headers', '-A', '-o', 'pid,ppid,args:4096,state'],\n quiet=True)\n assert stderr == '', stderr\n procs = []\n for l in stdout.split('\\n'):\n if l == '':\n continue\n m = re.match(r'^\\s*(\\d+)\\s+(\\d+)\\s+(.+)\\s+(.+)', l, re.DOTALL)\n assert m\n procs.append(\n (int(m.group(1)), m.group(3).rstrip(), int(m.group(2)), m.group(4)))\n logging.debug(\"ListProcesses(<predicate>)->[%i processes]\" % len(procs))\n return procs",
"def ListProcesses(self):\n stdout, stderr = self.RunCmdOnDevice(\n [\n '/bin/ps', '--no-headers', '-A', '-o', 'pid,ppid,args:4096,state'\n ],\n quiet=True)\n assert stderr == '', stderr\n procs = []\n for l in stdout.split('\\n'):\n if l == '':\n continue\n m = re.match(r'^\\s*(\\d+)\\s+(\\d+)\\s+(.+)\\s+(.+)', l, re.DOTALL)\n assert m\n procs.append((int(m.group(1)), m.group(3).rstrip(), int(m.group(2)),\n m.group(4)))\n logging.debug(\"ListProcesses(<predicate>)->[%i processes]\" % len(procs))\n return procs",
"def pids(self):\n resp = self.server.request(\"get\", \"/jobs/%s/%s/pids\" % (\n self.sessionid, self.name))\n result = self.server.json_body(resp)\n return result['pids']",
"def get_pid(name: str) -> Set[int]:\n process_pids = set()\n for proc in psutil.process_iter():\n if name == proc.name():\n pid = proc.pid\n process_pids.add(pid)\n return process_pids",
"def get_process_list() -> Dict:\n return {proc.pid: proc.name() for proc in psutil.process_iter()}",
"def list_running_tasks():\n inspector = current_app.control.inspect()\n\n return inspector.active()",
"def pids(node, java_class):\n cmd = \"ps -C java -wwo pid,args | grep '%s' | awk -F' ' '{print $1}'\" % java_class\n\n return [int(pid) for pid in node.account.ssh_capture(cmd, allow_fail=True)]",
"def pids(self):\r\n return copy(self._pids)",
"def processor_ids(self):\n return self._processor_ids",
"def get_children(pid):\n try:\n stdout=subprocess.check_output([\"ps\",\"--ppid\",pid,\"-o\",\"pid\"])\n except subprocess.CalledProcessError:\n stdout=[]\n\n pids=[]\n if stdout:\n pids=process_ps_stdout(stdout)\n\n return pids",
"def get_process_id(name):\n child = subprocess.Popen(['pgrep', '-f', name], stdout=subprocess.PIPE, shell=False)\n response = child.communicate()[0]\n return [int(pid) for pid in response.split()]",
"def monitoredProcs(self):\n return self._pidToProcess.itervalues()",
"def get_session_ids(self):\n with self._sessions_lock:\n session_ids = self.sessions.keys()\n\n return session_ids",
"def List(cls):\n\t\tres = {}\n\t\tfor p in glob.glob(\"/proc/*/cmdline\"):\n\t\t\tprocess = p.split(\"/\")[2]\n\t\t\tif cls.RE_PID.match(process):\n\t\t\t\tres[int(process)] = cat(p).replace(\"\\x00\", \" \")\n\t\treturn res",
"def get_running():\n ps = which('/usr/bin/ps') # avoid the old BSD variant\n lines = sh(ps, '-e', '-f', quiet=True)\n # The first line of the `ps' output is a header line which is\n # used to find the data field columns.\n column = lines[0].index('CMD')\n procs = set()\n for line in lines[1:]:\n cmd_line = line[column:]\n command = cmd_line.split()[0]\n procs.add(os.path.basename(command))\n return procs",
"def get_user_processes(user):\n result = []\n for process in psutil.process_iter():\n if process.username() == user:\n result.append(process.pid)\n return result",
"def get_process_info(name):\n process_lst = list()\n all_pid = psutil.pids()\n for pid in all_pid:\n info = psutil.Process(pid)\n if name in info.name():\n process_lst.append(info)\n\n return process_lst"
] | [
"0.7804924",
"0.7730347",
"0.7697995",
"0.7590113",
"0.7520795",
"0.7483522",
"0.7411062",
"0.7385953",
"0.7380598",
"0.73299384",
"0.73251057",
"0.7316895",
"0.7233392",
"0.7152342",
"0.71504444",
"0.71448386",
"0.7116604",
"0.7027047",
"0.69536257",
"0.6934355",
"0.6899609",
"0.6830717",
"0.6713193",
"0.667902",
"0.66630995",
"0.66236854",
"0.66233766",
"0.66061366",
"0.6600933",
"0.659585"
] | 0.813076 | 0 |
Like get_matches_commandline method, given a string, match the processes on the name but also returns the matched processes' children | def get_matches_commandline_with_children(self, match_pattern):
matched_pids = self.get_matches_commandline(match_pattern)
for matched_pid in matched_pids:
matched_pids.extend(self.get_child_processes(matched_pid))
return list(set(matched_pids)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_matches_commandline(self, match_pattern):\n\n matches = []\n for _process in self.processes:\n if re.search(match_pattern, _process[\"cmd\"]):\n matches.append(_process[\"pid\"])\n return matches",
"def find(name, arg=None):\r\n for p in get_processes():\r\n if p.name.lower().find(name.lower()) != -1:\r\n if arg is not None:\r\n for a in (p.cmdline or []):\r\n if a.lower().find(arg.lower()) != -1:\r\n return p\r\n else:\r\n return p\r\n return None",
"def findPIDs(name, user = os.getpid()):\n\n pids = []\n\n ps = subprocess.Popen(['ps', '-u', user, 'w'], stdout=subprocess.PIPE).communicate()[0]\n processes = ps.split('\\n')\n\n for line in processes:\n if len(line.split()) < 5:\n continue\n if re.match(name, line.split()[4]):\n #Then we have matching process\n pids.append(line.split()[0])\n\n return pids",
"def find(name, exact=False):\n processes = run(\"ps aux | grep {0}\".format(name))\n res = []\n for line in processes.split(\"\\n\"):\n if not line.strip():\n continue\n line = RE_SPACES.split(line, 10)\n # We skip lines that are not like we expect them (sometimes error\n # message creep up the output)\n if len(line) < 11:\n continue\n user, pid, cpu, mem, vsz, rss, tty, stat, start, time, command = line\n if (exact and command == name) \\\n or (not exact and command.startswith(name)):\n res.append(pid)\n return res",
"def get_pids(process_name, match_predicate=None):\n # default match predicate\n # why aren't we using psutil ??\n def default_predicate(target, given):\n return target.strip().lower() in given.lower()\n\n if match_predicate is None:\n match_predicate = default_predicate\n\n if process_name is None:\n raise j.exceptions.RuntimeError(\"process cannot be None\")\n if j.data.platform.is_unix():\n pids = set()\n for process in get_processes():\n try:\n pid = process.pid\n if not isinstance(pid, int):\n continue\n name = process.name()\n if match_predicate(process_name, name):\n pids.add(pid)\n elif match_predicate(process_name, process.exe()):\n pids.add(pid)\n else:\n cmdline = process.cmdline()\n if cmdline and cmdline[0]:\n if match_predicate(process_name, cmdline[0]):\n pids.add(pid)\n except (psutil.Error, FileNotFoundError):\n continue\n return list(pids)\n else:\n raise j.exceptions.NotImplemented(\"getProcessPid is only implemented for unix\")",
"def get_similar_processes():\n myprocess = get_my_process()\n result = []\n for item in psutil.process_iter():\n try:\n if item.cmdline() == myprocess.cmdline():\n result.append(item)\n except psutil.NoSuchProcess:\n pass\n return result",
"def get_process_cmdline(process_name):\n\n\tfor pretendant in execute(['ps', '-U', 'root', '-u', 'root', '-o', 'args='])[0].split(\n\t\t\t\"\\n\")[:-1]:\n\t\t#print pretendant\n\t\tif pretendant.find(process_name) != -1:\n\t\t\treturn pretendant.split(' ')",
"def get_process(proc_name):\n #LOG = log.getLogger(__name__)\n procList = []\n try:\n for pr in psutil.process_iter():\n for args in pr.cmdline():\n if proc_name in args:\n procList.append(pr.pid)\n return procList\n except BaseException as e:\n print(\"Error in fetching process: {}\".format(e))\n return None",
"def get_process_info(name):\n process_lst = list()\n all_pid = psutil.pids()\n for pid in all_pid:\n info = psutil.Process(pid)\n if name in info.name():\n process_lst.append(info)\n\n return process_lst",
"def list_processes(pid, name):\n \n if not pid and not name:\n rc, out, err = j.sal.process.execute(\"ps ax\")\n click.echo(out)\n elif name:\n click.echo(j.sal.process.psfind(name))\n elif pid:\n click.echo(j.sal.process.getProcessPid(pid))",
"def get_matching_pids(pattern):\n cmd = [\"pgrep\", \"-f\", pattern]\n rc, output, err = run_cmd_output(cmd)\n if rc == 0:\n # One or more processes matched\n pids = [int(p) for p in output.split('\\n') if p != \"\"]\n elif rc == 1:\n # No processes matched\n pids = []\n else:\n raise UserVisibleError(\"Failed to run {}\".format(\" \".join(cmd)))\n return pids",
"def pidof(process_name):\n\n\tpids = []\n\n\tif 'licornd' in process_name:\n\t\t# licorn / linux 3.x specifiq : we can match 'licornd/wmi'\n\t\t# faster than 'licornd-wmi', and in some case the 'cmdline'\n\t\t# is empty, whereas the 'comm' is not.\n\t\tnames = [ process_name, process_name.replace('/', '-') ]\n\n\telse:\n\t\tnames = [ process_name ]\n\n\tfor entry in os.listdir('/proc'):\n\t\tif entry.isdigit():\n\t\t\ttry:\n\n\t\t\t\tif cgroup and open('/proc/%s/cpuset' % entry).read().strip() != cgroup:\n\t\t\t\t\tlogging.progress(_(u'Skipped process @{0} which is not '\n\t\t\t\t\t\t\t\t\t\tu'in the same cgroup.').format(entry))\n\t\t\t\t\tcontinue\n\n\t\t\t\ttry:\n\t\t\t\t\t# Linux 3.x only\n\t\t\t\t\tcommand_line1 = open('/proc/%s/comm' % entry).read().strip()\n\t\t\t\texcept:\n\t\t\t\t\tcommand_line1 = ''\n\n\t\t\t\tcommand_line2 = open('/proc/%s/cmdline' % entry).read().strip()\n\n\t\t\t\tfor pname in names:\n\t\t\t\t\tif pname == command_line1 or pname+'\\0' in command_line2:\n\t\t\t\t\t\tpids.append(int(entry))\n\n\t\t\texcept (IOError, OSError), e:\n\t\t\t\t# in rare cases, the process vanishes during iteration. This\n\t\t\t\t# is harmless. Any other error is not cool, raise it.\n\t\t\t\tif e.errno != errno.ENOENT:\n\t\t\t\t\traise e\n\n\treturn pids",
"def find_all_child_processes(pids_only=False):\n children = psutil.Process().children(recursive=True)\n return [child.pid for child in children] if pids_only else children",
"def get_processes(sort_by_name=True):\r\n if sort_by_name:\r\n return sorted(_list_processes(), key=cmp_to_key(\r\n lambda p1, p2: (cmp(p1.name, p2.name) or cmp(p1.pid, p2.pid))\r\n ))\r\n else:\r\n return sorted(_list_processes(), key=cmp_to_key(\r\n lambda p1, p2: (cmp(p1.pid, p2.pid) or cmp(p1.name, p2.name))\r\n ))",
"def setChildPIDs(self):\n\t\tpids = []\n\t\tfor proc in process_iter():\n\t\t\tfor child in self.expectedChildren:\n\t\t\t\tif child == proc.name():\n\t\t\t\t\tif proc.parent().name() == \"Python\": # Hardcoded string comparison. Sue me.\n\t\t\t\t\t\tpids.append(proc.pid)\n\t\tself.pids = pids",
"def get_children(ppid):\n\n pid_dct = {}\n for proc in build_process_list():\n proc[\"_children\"] = []\n pid_dct[proc[\"pid\"]] = proc\n\n # fill in children array\n for pid in list(pid_dct.keys()):\n parent_pid = pid_dct[pid][\"parent_pid\"]\n\n if parent_pid in pid_dct:\n pid_dct[parent_pid][\"_children\"].append(pid)\n\n # now just walk down the tree\n if ppid is None or ppid not in pid_dct:\n # process has quit, we exit\n return []\n\n accepted = []\n to_accept = collections.deque([ppid, ])\n \n while to_accept:\n head = pid_dct[to_accept.popleft()]\n\n # do not include the monitoring pid\n if head[\"pid\"] != ppid:\n accepted.append(head)\n\n to_accept.extend(head.get(\"_children\", []))\n head[\"children\"] = head[\"_children\"]\n del head[\"_children\"]\n\n # deleting children breaks infinite loops\n # but Dima, can a process tree contain a loop? yes - via race-condition in reading procfs\n\n return accepted",
"def get_children(pid):\n try:\n stdout=subprocess.check_output([\"ps\",\"--ppid\",pid,\"-o\",\"pid\"])\n except subprocess.CalledProcessError:\n stdout=[]\n\n pids=[]\n if stdout:\n pids=process_ps_stdout(stdout)\n\n return pids",
"def get_pids_filtered_by_regex(regex_list, excludes=None):\n excludes = excludes or []\n res = []\n for process in psutil.process_iter():\n try:\n cmdline = process.cmdline()\n except psutil.NoSuchProcess:\n cmdline = None\n except psutil.AccessDenied:\n cmdline = None\n if cmdline:\n name = \" \".join(cmdline)\n for r in regex_list:\n if name.strip() != \"\" and re.match(r, name):\n res.append(process.pid)\n return res",
"def get_child_pids(pid):\n\n wmi = win32com.client.GetObject('winmgmts:')\n # noinspection SqlNoDataSourceInspection,SqlDialectInspection\n children = wmi.ExecQuery('SELECT * FROM Win32_Process WHERE ParentProcessID = %s' % pid)\n return [child.Properties_('ProcessId').Value for child in children]",
"def get_pid(name: str) -> Set[int]:\n process_pids = set()\n for proc in psutil.process_iter():\n if name == proc.name():\n pid = proc.pid\n process_pids.add(pid)\n return process_pids",
"def get_child_processes(self, ppid):\n\n all_children = []\n children_to_explore = set()\n for _pid in self.parent_to_children_map[ppid]:\n all_children.append(_pid)\n children_to_explore.add(_pid)\n\n # get the children 'recursively'\n while children_to_explore: # the invariant\n child_to_explore = children_to_explore.pop()\n if not self.parent_to_children_map.get(child_to_explore):\n continue\n unvisited = self.parent_to_children_map[child_to_explore]\n for node in unvisited:\n if node not in all_children:\n children_to_explore.add(node)\n all_children.append(node)\n return list(set(all_children))",
"def get_pids(name=None):\n results = []\n for process in win32com.client.GetObject('winmgmts:').InstancesOf('Win32_Process'):\n if name is None or process.Properties_(\"Name\").Value == name:\n results.append(process.Properties_(\"ProcessID\").Value)\n return results",
"def _complete_processes(self, text):\r\n processes = []\r\n for info in self._get_complete_info():\r\n if ':' in text or info['name'] != info['group']:\r\n processes.append('%s:%s' % (info['group'], info['name']))\r\n if '%s:*' % info['group'] not in processes:\r\n processes.append('%s:*' % info['group'])\r\n else:\r\n processes.append(info['name'])\r\n return [ p + ' ' for p in processes if p.startswith(text) ]",
"def _select_processes(self):\n\n # check if at least one process is running\n is_running = False\n for pid in self.__pids:\n if ProcessMonitor.__is_running(pid):\n is_running = True\n break # at least one process is running\n\n if is_running:\n if not self.__aggregate_multiple_processes:\n return self.__pids\n\n # aggregate metrics, check the last discovered time\n if (\n self.__last_discovered\n and time.time() * 1000 - self.__last_discovered\n < self.__process_discovery_interval * 1000\n ):\n return self.__pids\n\n ps = ProcessList()\n if self.__commandline_matcher:\n self.__last_discovered = time.time() * 1000\n if self.__include_child_processes:\n matched_processes = ps.get_matches_commandline_with_children(\n self.__commandline_matcher\n )\n else:\n matched_processes = ps.get_matches_commandline(\n self.__commandline_matcher\n )\n self.__pids = matched_processes\n\n if not self.__aggregate_multiple_processes and len(self.__pids) > 1:\n # old behaviour where multiple processes were not supported for aggregation\n self._logger.warning(\n \"Multiple processes match the command '%s'. Returning existing pid. \"\n \"You can turn on the multi process aggregation support by adding the \"\n \"aggregate_multiple_processes configuration to true\"\n % self.__commandline_matcher,\n limit_once_per_x_secs=300,\n limit_key=\"linux-process-monitor-existing-pid\",\n )\n self.__pids = [self.__pids[0]]\n else:\n # See if the specified target pid is running. If so, then return it.\n # Special cases:\n # '$$' mean this process.\n # '$$TBD' mean that the PID of the target process has not been determined yet and it will be set later.\n pids = []\n if self.__target_pids:\n for t_pid in self.__target_pids:\n if t_pid == \"$$\":\n t_pid = int(os.getpid())\n\n # skip this until it will be replaced with a real PID.\n elif t_pid == \"$$TBD\":\n continue\n else:\n t_pid = int(t_pid)\n pids.append(t_pid)\n self.__pids = pids\n return self.__pids",
"def findProcessIdByName(processName):\n listOfProcessObjects = []\n # Iterate over the all the running process\n for proc in psutil.process_iter():\n try:\n pinfo = proc.as_dict(attrs=[\"pid\", \"name\", \"create_time\"])\n # Check if process name contains the given name string.\n if processName.lower() in pinfo[\"name\"].lower():\n listOfProcessObjects.append(pinfo)\n except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):\n pass\n return listOfProcessObjects",
"def _psa(cmd, allmatching=True, paths=None):\n import psutil\n pids = list()\n cmdlines = list()\n procs = list()\n cmdline = ''\n bins = _whicha(cmd, paths)\n if not allmatching:\n bins = bins[:1]\n for pid in psutil.pids():\n try:\n proc = psutil.Process(pid)\n cmdline = proc.cmdline()\n if any([bin in cmdline for bin in bins]):\n cmdlines.append(cmdline)\n pids.append(pid)\n procs.append(proc)\n except psutil.ZombieProcess:\n pass\n except psutil.AccessDenied:\n pass\n return (pids, cmdlines, procs)",
"def ps_find(name):\n for proc in psutil.process_iter():\n if proc.name() == name:\n return True\n return False",
"def get_process_id(name):\n child = subprocess.Popen(['pgrep', '-f', name], stdout=subprocess.PIPE, shell=False)\n response = child.communicate()[0]\n return [int(pid) for pid in response.split()]",
"def kill_process_by_name(re_pattern):\n\n user_name = os.getlogin()\n parent_pid = os.getppid()\n current_pid = os.getpid()\n\n stdin = subprocess.check_output([\"ps\", \"-u\", user_name])\n\n processes = []\n\n processes = [(int(re.match(\" *[0-9]+\", line).group()), line.split(' ')[-1]) for line in stdin.split('\\n')[1:-1]]\n\n for process in processes:\n\n if re.match(re_pattern, process[1]) and process[0] != current_pid:\n# print \"KILLING PID: \", process\n os.kill(process[0], signal.SIGKILL)",
"def ProcessIterator(pids, process_regex_string, cmdline_regex_string,\n ignore_grr_process, error_list):\n pids = set(pids)\n if ignore_grr_process:\n grr_pid = psutil.Process().pid\n else:\n grr_pid = -1\n\n if process_regex_string:\n process_regex = re.compile(process_regex_string)\n else:\n process_regex = None\n\n if cmdline_regex_string:\n cmdline_regex = re.compile(cmdline_regex_string)\n else:\n cmdline_regex = None\n\n if pids:\n process_iterator = []\n for pid in pids:\n try:\n process_iterator.append(psutil.Process(pid=pid))\n except Exception as e: # pylint: disable=broad-except\n error_list.Append(\n rdf_memory.ProcessMemoryError(\n process=rdf_client.Process(pid=pid), error=str(e)))\n else:\n process_iterator = psutil.process_iter()\n\n for p in process_iterator:\n if process_regex and not process_regex.search(p.name()):\n continue\n\n if cmdline_regex and not cmdline_regex.search(\" \".join(p.cmdline())):\n continue\n\n if p.pid == grr_pid:\n continue\n\n yield p"
] | [
"0.69588715",
"0.63922125",
"0.61809975",
"0.6156374",
"0.6085242",
"0.602419",
"0.59251225",
"0.5850459",
"0.58301437",
"0.58174837",
"0.58165544",
"0.5804579",
"0.58037686",
"0.57153285",
"0.56987065",
"0.5696199",
"0.5649648",
"0.5642261",
"0.55726624",
"0.5563265",
"0.55540144",
"0.55440676",
"0.5525824",
"0.5525218",
"0.55177826",
"0.54925454",
"0.53414667",
"0.5338843",
"0.53336465",
"0.5264208"
] | 0.75858635 | 0 |
For a process, record the metrics in a historical metrics collector Collects the historical result of each metric per process in __metrics_history | def record_metrics(self, pid, metrics):
for _metric, _metric_value in metrics.items():
if not self.__metrics_history[pid].get(_metric):
self.__metrics_history[pid][_metric] = []
self.__metrics_history[pid][_metric].append(_metric_value)
# only keep the last 2 running history for any metric
self.__metrics_history[pid][_metric] = self.__metrics_history[pid][_metric][
-2:
] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _calculate_aggregated_metrics(self):\n\n # using the historical values, calculate the aggregate\n # there are two kinds of metrics:\n # a) cumulative metrics - only the delta of the last 2 recorded values is used (eg cpu cycles)\n # b) absolute metrics - the last absolute value is used\n\n running_pids_set = set(self.__pids)\n\n for pid, process_metrics in self.__metrics_history.items():\n for _metric, _metric_values in process_metrics.items():\n if not self.__aggregated_metrics.get(_metric):\n self.__aggregated_metrics[_metric] = 0\n if _metric.is_cumulative:\n if pid in running_pids_set:\n if len(_metric_values) > 1:\n # only report the cumulative metrics for more than one sample\n self.__aggregated_metrics[_metric] += (\n _metric_values[-1] - _metric_values[-2]\n )\n else:\n if pid in running_pids_set:\n # absolute metric - accumulate the last reported value\n self.__aggregated_metrics[_metric] += _metric_values[-1]",
"def collect(self):\n if not self._btime:\n return\n\n try:\n with open(\"/proc/self/stat\", 'rb') as stat:\n parts = (stat.read().split(b')')[-1].split())\n\n self.process_metrics[\"vmem\"].set(\"\", float(parts[20]))\n self.process_metrics[\"rss\"].set(\"\", float(parts[21]) * _PAGESIZE)\n start_time_secs = float(parts[19]) / self._ticks\n self.process_metrics[\"start_time\"].set(\n \"\", start_time_secs + self._btime\n )\n utime = float(parts[11]) / self._ticks\n stime = float(parts[12]) / self._ticks\n self.process_metrics[\"cpu\"].set(\"\", utime + stime)\n except IOError:\n pass\n\n try:\n with open(\"/proc/self/limits\", 'rb') as limits:\n for line in limits:\n if line.startswith(b'Max open file'):\n self.process_metrics[\"max_fds\"].set(\n \"\", float(line.split()[3])\n )\n break\n\n self.process_metrics[\"open_fds\"].set(\n \"\", float(len(os.listdir(\"/proc/self/fd\")))\n )\n except (IOError, OSError):\n pass\n\n # Update gc metrics if enabled.\n if \"collected\" in self.process_metrics:\n for generation, stat in enumerate(gc.get_stats()):\n generation = {\"generation\": str(generation)}\n self.process_metrics[\"collected\"].set(\n generation, stat[\"collected\"]\n )\n self.process_metrics[\"uncollectable\"].set(\n generation, stat[\"uncollectable\"]\n )\n self.process_metrics[\"collections\"].set(\n generation, stat[\"collections\"]\n )",
"def gather_sample(self):\n\n for _pid in self._select_processes():\n if not self.__trackers.get(_pid):\n self.__trackers[_pid] = ProcessTracker(_pid, self._logger, self.__id)\n\n self._reset_absolute_metrics()\n\n for _tracker in self.__trackers.values():\n _metrics = _tracker.collect()\n self.record_metrics(_tracker.pid, _metrics)\n\n self._calculate_aggregated_metrics()\n self._remove_dead_processes()\n\n self.print_metrics()",
"def _reset_absolute_metrics(self):\n\n for pid, process_metrics in self.__metrics_history.items():\n for _metric, _metric_values in process_metrics.items():\n if not _metric.is_cumulative:\n self.__aggregated_metrics[_metric] = 0",
"def addMonitoring(process):\n import FWCore.ParameterSet.Config as cms\n \n process.SimpleMemoryCheck = cms.Service(\"SimpleMemoryCheck\",\n jobReportOutputOnly = cms.untracked.bool(True)\n )\n process.Timing = cms.Service(\"Timing\",\n summaryOnly = cms.untracked.bool(True)\n )\n \n return process",
"def generate_history(self):\n self.reporter.generate()",
"def register_process_statistics():\n if resource is None:\n log.warning(\n 'Unable to import resource module, memory diags not available'\n )\n return\n\n rusage_fields = [\n ('Execution time in user mode (seconds)', 'ru_utime'),\n ('Execution time in kernel mode (seconds)', 'ru_stime'),\n ('Maximum Resident Set Size (KB)', 'ru_maxrss'),\n ('Soft page faults', 'ru_minflt'),\n ('Hard page faults', 'ru_majflt'),\n ('Input events', 'ru_inblock'),\n ('Output events', 'ru_oublock'),\n ('Voluntary context switches', 'ru_nvcsw'),\n ('Involuntary context switches', 'ru_nivcsw'),\n ]\n\n def dump(log):\n process = resource.getrusage(resource.RUSAGE_SELF)\n for name, field in rusage_fields:\n data = getattr(process, field, 'None')\n log.info('%s: %s', name, data)\n\n register_diags('Process Statistics', dump)",
"def processStats(self):\n return self._processes.itervalues()",
"def prepareAccumulatedMetrics(self):\n displayDF = analyzeMetricsDF(self.resultList)\n displayDF.to_csv(\"data/results.csv\")",
"def collect(self):\n\n collector = {}\n for gather in self.gathers:\n try:\n stats = gather.run_single_cycle(collector=collector)\n if stats:\n collector.update(stats)\n except Exception as ex:\n self._logger.exception(\n \"Exception while collecting metrics for PID: %s of type: %s. Details: %s\",\n self.pid,\n type(gather),\n repr(ex),\n )\n return collector",
"def compute_metrics(self):\n pass",
"def accumulateSubgridMassHistory(self,q):\n pass",
"def get_historical_route_metrics():\n\n db = db_session.get_db_read_replica()\n with db.scoped_session() as session:\n return _get_historical_route_metrics(session)",
"def mymetrics(): \n _update_metric_counters()\n logging.debug(prom_objects_seen.collect())\n return flask.Response(generate_latest(), mimetype='text/plain')",
"def metrics(self):\n raise NotImplementedError(\"metrics\")",
"def metrics(self, metrics):\n\n self._metrics = metrics",
"def test_get_derived_metric_history(self):\n pass",
"def _proc_collect(self) -> None:\n while True:\n self.process_num_threads.set(self._process.num_threads())\n self.process_memory_bytes.set(self._process.memory_info().rss)\n self.process_cpu_percent.set(self._process.cpu_percent())\n\n sleep(self.process_scrape_interval)",
"def _measure(\n self, process: multiprocessing.Process\n ) -> Tuple[float, List[ResourceStats], bool]:\n started_time = time.time()\n is_killed = False\n proc_info = psutil.Process(process.pid)\n stats = []\n\n with timeit_context() as timeit:\n while process.is_alive():\n if time.time() - started_time > self.timeout:\n is_killed = True\n break\n stats.append(self._get_stats_record(proc_info))\n\n time.sleep(self.period)\n\n if is_killed:\n process.terminate()\n\n process.join()\n time_usage = timeit.time_passed\n\n return time_usage, stats, is_killed",
"def add_process(self):\r\n\r\n proc_dict = dict()\r\n total_count = len(self.newest_connections['pid'].unique())\r\n count = 0\r\n for proc in self.newest_connections['pid'].unique():\r\n count += 1\r\n percent = round((count / total_count * 100))\r\n print('{}{}Identifying processes in progress. Accomplished: {}%{}'.format(Colors.GREEN,Colors.BOLD,percent,Colors.END), end='\\r')\r\n output = subprocess.run([\"powershell.exe\", \"-Command\", f'Get-Process -Id {proc} | select-object -Property ProcessName | ft -HideTableHeaders'], capture_output=True, text=True).stdout.strip()\r\n proc_dict[proc] = output\r\n print()\r\n processes = pd.Series(proc_dict)\r\n processes_df = pd.DataFrame(processes.reset_index())\r\n processes_df.columns = ['pid', 'process_name']\r\n if 'process_name' in self.newest_connections:\r\n self.newest_connections = pd.merge(self.newest_connections, processes_df, on=['pid', 'process_name'], how='right')\r\n else:\r\n self.newest_connections = pd.merge(self.newest_connections, processes_df, on='pid', how='right')\r\n return self.newest_connections",
"def __init__(self):\n super().__init__()\n self.printTag = 'POSTPROCESSOR Metrics'\n self.dynamic = False # is it time-dependent?\n self.features = None # list of feature variables\n self.targets = None # list of target variables\n self.metricsDict = {} # dictionary of metrics that are going to be assembled\n self.multiOutput = 'mean'# defines aggregating of multiple outputs for HistorySet\n # currently allow mean, max, min, raw_values\n self.weight = None # 'mean' is provided for self.multiOutput, weights can be used\n # for each individual output when all outputs are averaged\n self.pivotParameter = None\n self.pivotValues = []\n # assembler objects to be requested\n self.addAssemblerObject('Metric', InputData.Quantity.one_to_infinity)",
"def get_metric_history(self, metric):\n return self.client._perform_json(\n \"GET\", \"/projects/%s/managedfolders/%s/metrics/history\" % (self.project_key, self.odb_id),\n params={'metricLookup' : metric if isinstance(metric, str) or isinstance(metric, unicode) else json.dumps(metric)})",
"def save_to_history(self):\n for stat_type in self.log_book.keys():\n stat = self.get_stat(stat_type)\n self.history[stat_type].append(stat)\n self.init_stat()",
"def updateProcess(self, machine, process):\n\n stamp = time.time() - self.initTime\n if machine in self.activity.keys():\n if ((\"processes\" in self.activity[machine].keys()) and \n (process in self.activity[machine][\"processes\"].keys())):\n self.activity[machine][\"processes\"][process].append(stamp)\n else:\n self.activity[machine][\"processes\"] = {process : [stamp]}\n else:\n self.activity[machine] = {\"filtered activity\" : [],\n \"raw activity\" : [],\n \"time\" : [],\n \"processes\" : {process : [stamp]}}",
"def _get_stats_record(proc_info: psutil.Process) -> ResourceStats:\n return ResourceStats(\n time.time(),\n proc_info.cpu_percent(),\n memory_profiler.memory_usage(proc_info.pid, max_usage=True),\n )",
"def _compute_running_metrics(self,\n model_output: torch.Tensor,\n batch: Tuple[torch.Tensor, torch.Tensor],\n running_metrics: dict) -> None:\n for metric in self.metrics:\n if metric.__name__ == 'word_error_rate' or metric.__name__ == 'character_error_rate':\n metric_result = metric(model_output, batch, self.decoder)\n else:\n metric_result = metric(model_output, batch)\n if type(metric_result) == torch.Tensor:\n metric_result = metric_result.item()\n\n running_metrics[metric.__name__].append(metric_result)",
"def log_metrics(self, metrics: dict):\n self.metrics.update(metrics)\n\n self._sync_log_event()",
"def agg_history(self):\n cd_list, cr_list = zip(*self._history)\n return pd.concat(cd_list), pd.concat(cr_list)",
"def record_apdex_metrics(self, metrics):\n\n if not self.__settings:\n return\n\n for metric in metrics:\n self.record_apdex_metric(metric)",
"def appendProcessingHistoryItem(context, item):\n projectDir = context.projectDir\n history = GenericMetadata._readEntriesForSection(projectDir, GenericMetadata.HISTORY_SECTION)\n try:\n idx = int(history['numsteps'])\n except KeyError:\n idx = 0\n idx += 1\n \n idxStr = str(idx)\n key = GenericMetadata.HISTORY_PROTO + idxStr\n GenericMetadata._writeEntriesToSection(projectDir, GenericMetadata.HISTORY_SECTION, [key, 'numsteps'], [item, idxStr])"
] | [
"0.6638309",
"0.61203325",
"0.6019831",
"0.59060955",
"0.59058285",
"0.55844504",
"0.54852",
"0.54657155",
"0.53771794",
"0.53647846",
"0.5356617",
"0.5345713",
"0.5341496",
"0.53098404",
"0.52967745",
"0.5279151",
"0.5278557",
"0.52188367",
"0.52059686",
"0.51821625",
"0.5178401",
"0.5152216",
"0.514435",
"0.5143143",
"0.5109523",
"0.5090498",
"0.5074378",
"0.5074166",
"0.5062749",
"0.506044"
] | 0.7013191 | 0 |
At the beginning of each process metric calculation, the absolute (noncumulative) metrics need to be overwritten to the combined process(es) result. Only the cumulative metrics need the previous value to calculate delta. We should set the absolute metric to 0 in the beginning of this "epoch" | def _reset_absolute_metrics(self):
for pid, process_metrics in self.__metrics_history.items():
for _metric, _metric_values in process_metrics.items():
if not _metric.is_cumulative:
self.__aggregated_metrics[_metric] = 0 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _calculate_aggregated_metrics(self):\n\n # using the historical values, calculate the aggregate\n # there are two kinds of metrics:\n # a) cumulative metrics - only the delta of the last 2 recorded values is used (eg cpu cycles)\n # b) absolute metrics - the last absolute value is used\n\n running_pids_set = set(self.__pids)\n\n for pid, process_metrics in self.__metrics_history.items():\n for _metric, _metric_values in process_metrics.items():\n if not self.__aggregated_metrics.get(_metric):\n self.__aggregated_metrics[_metric] = 0\n if _metric.is_cumulative:\n if pid in running_pids_set:\n if len(_metric_values) > 1:\n # only report the cumulative metrics for more than one sample\n self.__aggregated_metrics[_metric] += (\n _metric_values[-1] - _metric_values[-2]\n )\n else:\n if pid in running_pids_set:\n # absolute metric - accumulate the last reported value\n self.__aggregated_metrics[_metric] += _metric_values[-1]",
"def _momentum_update(self):\n for param_ol, param_tgt in zip(self.online_net.parameters(),\n self.target_net.parameters()):\n param_tgt.data = param_tgt.data * self.momentum + \\\n param_ol.data * (1. - self.momentum)",
"def getCummulativeValues(self):\n self.cumulativePhaseHeightInRing1 = np.cumsum(self.phaseHeightInRing1)\n self.cumulativePhaseHeightInRing2 = np.cumsum(self.phaseHeightInRing2)\n self.cumulativeLeftCriticalPointsRing1 = np.cumsum(self.leftCriticalPointsRing1)\n self.cumulativeRightCriticalPointsRing1 = np.cumsum(self.rightCriticalPointsRing1)\n self.cumulativeLeftCriticalPointsRing2 = np.cumsum(self.leftCriticalPointsRing2)\n self.cumulativeRightCriticalPointsRing2 = np.cumsum(self.rightCriticalPointsRing2)\n\n if(self.init1 > 0):\n for index, value in enumerate(self.cumulativeLeftCriticalPointsRing1):\n self.cumulativeLeftCriticalPointsRing1[index] = value + self.init1\n for index, value in enumerate(self.cumulativeRightCriticalPointsRing1):\n self.cumulativeRightCriticalPointsRing1[index] = value + self.init1\n\n if(self.init2 > 0):\n for index, value in enumerate(self.cumulativeLeftCriticalPointsRing2):\n self.cumulativeLeftCriticalPointsRing2[index] = value + self.init2\n for index, value in enumerate(self.cumulativeRightCriticalPointsRing2):\n self.cumulativeRightCriticalPointsRing2[index] = value + self.init2\n\n self.cumulativePhaseHeightInRing1 = np.insert(self.cumulativePhaseHeightInRing1, 0, 0.0)\n self.cumulativePhaseHeightInRing2 = np.insert(self.cumulativePhaseHeightInRing2, 0, 0.0)\n self.cumulativeLeftCriticalPointsRing1 = np.insert(self.cumulativeLeftCriticalPointsRing1, 0, 0.0)\n self.cumulativeRightCriticalPointsRing1 = np.insert(self.cumulativeRightCriticalPointsRing1, 0, 0.0)\n self.cumulativeLeftCriticalPointsRing2 = np.insert(self.cumulativeLeftCriticalPointsRing2, 0, 0.0)\n self.cumulativeRightCriticalPointsRing2 = np.insert(self.cumulativeRightCriticalPointsRing2, 0, 0.0)",
"def calculate_epoch_metrics(self, val_metrics=False):\n metric_names = self.tracked_metrics()\n\n for metric in metric_names:\n if val_metrics:\n mean_val = np.array(self.metrics_history[f\"val_{metric}\"][\"batch_vals\"]).mean()\n self.metrics_history[f\"val_{metric}\"][\"epoch_vals\"].append(mean_val)\n else:\n mean_val = np.array(self.metrics_history[metric][\"batch_vals\"]).mean()\n self.metrics_history[metric][\"epoch_vals\"].append(mean_val)",
"def ModifyInitialProperties(self):\n super().ModifyInitialProperties()\n for aux_process in self.project_parameters[\"processes\"][\"auxiliar_process_list\"]:\n if aux_process[\"python_module\"].GetString() == \"temporal_statistics_process\":\n aux_process[\"Parameters\"][\"statistics_start_point_control_value\"].SetDouble(self.project_parameters[\"problem_data\"][\"burnin_time\"].GetDouble())",
"def compute_values(self, update_statistics=False):\n\n self.compute_iterations()\n self.axsec = sum([one.axsec for one in self])\n self.xsec = sum([one.xsec for one in self])\n self.xerrc = sum([one.xerrc for one in self])\n self.xerru = math.sqrt(sum([one.xerru**2 for one in self]))\n\n self.nevents = sum([one.nevents for one in self])\n self.nw = sum([one.nw for one in self])\n self.maxit = len(self.yerr_iter) # \n self.nunwgt = sum([one.nunwgt for one in self]) \n self.wgt = 0\n self.luminosity = min([0]+[one.luminosity for one in self])\n if update_statistics:\n self.run_statistics.aggregate_statistics([_.run_statistics for _ in self])",
"def accumulateSubgridMassHistory(self,q):\n if self.trackSubScales:\n for ci in range(self.nc):\n self.subgridTmp[ci][:] = self.subgridError_last[ci]\n #would be nice to have dt^{n+1} alone\n dt = self.timeIntegration.dt\n assert dt > 0.0\n dtInv = old_div(1.0,dt)\n self.subgridTmp[ci] *= dtInv\n self.subgridTmp[ci] *= self.subgridErrorMassCoef_last[ci]#figure this out\n #mwf debug\n logEvent(\"HaukeSangalliTrackSubScales accumulating delta u^n.abs.max= %s dm.max=%s \" % (max(numpy.absolute(self.subgridTmp[ci].flat)),max(numpy.absolute(self.subgridErrorMassCoef_last[ci].flat))),1)\n #mwf should be\n q[('mt',ci)] -= self.subgridTmp[ci]\n #don't think this matters right now because called after calculateSubgridError\n self.subgridTmp_ip[ci][:] = self.subgridError_ip_last[ci]\n self.subgridTmp_ip[ci] *= dtInv\n self.subgridTmp_ip[ci] *= self.subgridErrorMassCoef_ip_last[ci]#figure this out\n self.cip[('mt',ci)] -= self.subgridTmp_ip[ci]",
"def accumulateSubgridMassHistory(self,q):\n if self.trackSubScales:\n for ci in range(self.nc):\n self.subgridTmp[ci][:] = self.subgridError_last[ci]\n dt = self.timeIntegration.dt\n assert dt > 0.0\n dtInv = old_div(1.0,dt)\n self.subgridTmp[ci] *= dtInv\n self.subgridTmp[ci] *= self.subgridErrorMassCoef_last[ci]#decide how to approximate\n logEvent(\"ADR trackSubScales accumulating delta u^n.abs.max= %s dm.max=%s \" % (max(numpy.absolute(self.subgridTmp[ci].flat)),\n max(numpy.absolute(self.subgridErrorMassCoef_last[ci].flat))),10)\n\n q[('mt',ci)] -= self.subgridTmp[ci]",
"def processed_cum_overall(self):\n self.processed_cum_overall = (\n self.cumulative_stats_for_team_each_year\n [['Season','TeamID','win_rate','total_score','total_opponent_score','fgp','fg3p','ftp', 'total_rebounds','total_off_rebounds','total_def_rebounds',\n 'total_off_rebounds_percent','total_def_rebounds_percent','total_rebound_possession_percent','total_rebound_possessiongain_percent','total_blocks',\n 'total_assists','total_steals','total_turnover','total_personalfoul','total_block_opp_FGA_percent','total_assist_per_fgm','total_assist_turnover_ratio',\n 'expectation_per_game','avg_lose_score_by','avg_win_score_by']]\n )",
"def calculate_batch_metrics(self):\n pass",
"def collect(self):\n if not self._btime:\n return\n\n try:\n with open(\"/proc/self/stat\", 'rb') as stat:\n parts = (stat.read().split(b')')[-1].split())\n\n self.process_metrics[\"vmem\"].set(\"\", float(parts[20]))\n self.process_metrics[\"rss\"].set(\"\", float(parts[21]) * _PAGESIZE)\n start_time_secs = float(parts[19]) / self._ticks\n self.process_metrics[\"start_time\"].set(\n \"\", start_time_secs + self._btime\n )\n utime = float(parts[11]) / self._ticks\n stime = float(parts[12]) / self._ticks\n self.process_metrics[\"cpu\"].set(\"\", utime + stime)\n except IOError:\n pass\n\n try:\n with open(\"/proc/self/limits\", 'rb') as limits:\n for line in limits:\n if line.startswith(b'Max open file'):\n self.process_metrics[\"max_fds\"].set(\n \"\", float(line.split()[3])\n )\n break\n\n self.process_metrics[\"open_fds\"].set(\n \"\", float(len(os.listdir(\"/proc/self/fd\")))\n )\n except (IOError, OSError):\n pass\n\n # Update gc metrics if enabled.\n if \"collected\" in self.process_metrics:\n for generation, stat in enumerate(gc.get_stats()):\n generation = {\"generation\": str(generation)}\n self.process_metrics[\"collected\"].set(\n generation, stat[\"collected\"]\n )\n self.process_metrics[\"uncollectable\"].set(\n generation, stat[\"uncollectable\"]\n )\n self.process_metrics[\"collections\"].set(\n generation, stat[\"collections\"]\n )",
"def accumulateSubgridMassHistory(self,q):\n if self.trackSubScales:\n for ci in range(1,self.nc):\n self.subgridTmp[ci][:] = self.subgridError_last[ci]\n dt = self.timeIntegration.dt\n assert dt > 0.0\n dtInv = old_div(1.0,dt)\n self.subgridTmp[ci] *= dtInv\n self.subgridTmp[ci] *= self.subgridErrorMassCoef_last[ci]#decide how to approximate\n logEvent(\"NS_ASGS trackSubScales accumulating delta u^n ci=%s .abs.max= %s dm.max=%s \" % (ci,max(numpy.absolute(self.subgridTmp[ci].flat)),\n max(numpy.absolute(self.subgridErrorMassCoef_last[ci].flat))),1)\n\n q[('mt',ci)] -= self.subgridTmp[ci]",
"def calc_stat_values(self):",
"def PostTrainingStepUpdate(self):\n p = self.params\n # Get sufficient stats that accumulates over microbatches.\n counts = self.accumulators.counts.GetValue()\n mean_ss = self.accumulators.mean_ss.GetValue()\n variance_ss = self.accumulators.variance_ss.GetValue()\n # Compute batch mean and batch variance from sufficient stats\n mean, variance = tf.nn.normalize_moments(counts, mean_ss, variance_ss, None)\n decay = tf.convert_to_tensor(1.0 - p.decay, p.dtype)\n # Update moving_mean, moving_variance from batch mean and batch variance.\n with tf.name_scope(p.name) as scope:\n with tf.ops.colocate_with(self.vars.moving_mean):\n mean_update = tf.assign_sub(\n self.vars.moving_mean,\n tf.where(\n tf.greater(counts, 0.5),\n (self.vars.moving_mean - tf.cast(mean, p.dtype)) * decay,\n tf.zeros_like(self.vars.moving_mean)),\n name='moving_mean_update')\n with tf.ops.colocate_with(self.vars.moving_variance):\n var_update = tf.assign_sub(\n self.vars.moving_variance,\n tf.where(\n tf.greater(counts, 0.5),\n (self.vars.moving_variance - tf.cast(variance, p.dtype)) *\n decay, tf.zeros_like(self.vars.moving_variance)),\n name='moving_variance_update')\n py_utils.CheckNumerics(\n self.vars.moving_mean,\n 'moving mean of {} failed numeric check'.format(scope))\n py_utils.CheckNumerics(\n self.vars.moving_variance,\n 'moving variance of {} failed numeric check'.format(scope))\n self.accumulators.counts.Reset()\n self.accumulators.mean_ss.Reset()\n self.accumulators.variance_ss.Reset()\n return tf.group(mean_update, var_update)",
"def on_epoch_begin(self, epoch, logs={}):\n self.current_progress = 0\n self.loss = 0\n self.accuracy = 0",
"def calc(self):\n self.proc_blocks = [cluster.cells for cluster in self.clusters]\n self.cell_loads = [sum([len(cell) for cell in self.proc_blocks])]\n self.particle_loads = [cluster.np for cluster in self.clusters]\n self.imbalance = LoadBalancer.get_load_imbalance(self.particle_loads)",
"def compute_audit(self):\r\n \r\n time = datetime.now()\r\n H0_dist = []\r\n Ha_dist = []\r\n\r\n for i in range(0, self.m):\r\n #print(\"CURRENT H0 dist: \", H0_dist)\r\n #try:\r\n H0_dist = self.next_round_dist(True, H0_dist, i)\r\n Ha_dist = self.next_round_dist(False, Ha_dist, i)\r\n '''\r\n except Exception as e:\r\n \r\n print(e)\r\n self.bad = H0_dist\r\n self.bad2 = Ha_dist\r\n return\r\n '''\r\n self.decide_k_min(H0_dist, Ha_dist, i)\r\n #print('ROUND INDEX: ',i,'kminschedl: ',self.k_min_sched[i])\r\n\r\n #self.truncate_dist(H0_dist, i)\r\n H0_dist = H0_dist[:self.k_min_sched[i]]\r\n #self.truncate_dist(Ha_dist, i)\r\n Ha_dist = Ha_dist[:self.k_min_sched[i]]\r\n \r\n #print(\"The outputs: k_mins, LR denominator, LR numerator, 1 / LR (or alpha').\")\r\n #print(self.k_min_sched, '\\n', self.pr_H0_sched, '\\n', self.pr_Ha_sched, '\\n', \r\n #self.risk_sched)\r\n #print(\"Output suppressed. Use instance variables k_min_sched, pr_H0_sched, pr_Ha_sched, risk_sched\")\r\n\r\n #print(\"Time elapsed:\", datetime.now() - time)\r",
"def __apply_accumulators():\n self.__xdata = np.array([])\n self.__ydata = np.array([])\n for acc in self.signal_accumulators:\n self.__xdata = __array_append(self.__xdata,acc.attempt)\n self.__ydata = __array_append(self.__ydata,acc.count)\n self.__applied = True",
"def prepareAccumulatedMetrics(self):\n displayDF = analyzeMetricsDF(self.resultList)\n displayDF.to_csv(\"data/results.csv\")",
"def __reset(self):\n\t\tself.__highest = -float('inf')\n\t\tself.__lowest = float('inf')\n\t\tself.__total = 0\n\t\tself.__steps = 0\n\t\tself.__cold_days = 0",
"def _compute_running_metrics(self,\n model_output: torch.Tensor,\n batch: Tuple[torch.Tensor, torch.Tensor],\n running_metrics: dict) -> None:\n for metric in self.metrics:\n if metric.__name__ == 'word_error_rate' or metric.__name__ == 'character_error_rate':\n metric_result = metric(model_output, batch, self.decoder)\n else:\n metric_result = metric(model_output, batch)\n if type(metric_result) == torch.Tensor:\n metric_result = metric_result.item()\n\n running_metrics[metric.__name__].append(metric_result)",
"def precalculate():\n pass",
"def precalculate():\n pass",
"def compute_metrics(self):\n pass",
"def office_calculate_cumulative_misfit(parser, args, params):\n local_args = parser.parse_known_args(args)\n control.calculate_cumulative_misfit(params)",
"def initRunningVals(self):\n self.r_Vm = [0.0]*self.mirror.dataPoints\n self.r_Va = [0.0]*self.mirror.dataPoints",
"def reduce_run():",
"def calculate_before_process(self):\n typ = self.duration.get('type')\n val = self.duration.get('value')\n\n if self.process == \"Like\":\n if typ == \"by_time\":\n self.media_to_like = round(val*self.limits_per_hour.get('like'))\n elif typ == \"by_likes\":\n self.media_to_like = round(val)\n\n elif self.process == \"Like-and-follow\":\n if typ == \"by_time\":\n self.users_to_follow = round(val*self.limits_per_hour.get('follow'))\n elif typ == \"by_users\":\n self.users_to_follow = round(val)",
"def calculate(self):\n #runs = [ai\n # for ei in self.experiment_queues\n # for ai in ei.cleaned_automated_runs]\n #\n #ni = len(runs)\n #self.nruns = ni\n # for ei in self.experiment_queues:\n # dur=ei.stats.calculate_duration(ei.cleaned_automated_runs)\n # if\n\n\n tt = sum([ei.stats.calculate_duration(ei.cleaned_automated_runs)\n for ei in self.experiment_queues])\n self._total_time = tt\n offset = 0\n if self._start_time:\n offset = time.time() - self._start_time\n\n self.etf = self.format_duration(tt - offset)",
"def preinitialize(self):\n for group in self.param_groups:\n for p in group['params']:\n if group['momentum'] != 0:\n self.state[p][\"momentum_buffer\"] = torch.zeros_like(\n p, device=\"cpu\"\n ).to(p.device)"
] | [
"0.66141224",
"0.5771144",
"0.57508063",
"0.5575068",
"0.5574594",
"0.55501413",
"0.5535069",
"0.54281497",
"0.53845894",
"0.535291",
"0.5337103",
"0.5333742",
"0.5300862",
"0.5274768",
"0.5269434",
"0.52690667",
"0.52612674",
"0.5259409",
"0.52576226",
"0.5246559",
"0.52445793",
"0.52266544",
"0.52266544",
"0.52198565",
"0.5217178",
"0.5192304",
"0.5188704",
"0.5176454",
"0.5167211",
"0.5157337"
] | 0.75371194 | 0 |
Calculates the aggregated metric values based on the current running processes and the historical metric record | def _calculate_aggregated_metrics(self):
# using the historical values, calculate the aggregate
# there are two kinds of metrics:
# a) cumulative metrics - only the delta of the last 2 recorded values is used (eg cpu cycles)
# b) absolute metrics - the last absolute value is used
running_pids_set = set(self.__pids)
for pid, process_metrics in self.__metrics_history.items():
for _metric, _metric_values in process_metrics.items():
if not self.__aggregated_metrics.get(_metric):
self.__aggregated_metrics[_metric] = 0
if _metric.is_cumulative:
if pid in running_pids_set:
if len(_metric_values) > 1:
# only report the cumulative metrics for more than one sample
self.__aggregated_metrics[_metric] += (
_metric_values[-1] - _metric_values[-2]
)
else:
if pid in running_pids_set:
# absolute metric - accumulate the last reported value
self.__aggregated_metrics[_metric] += _metric_values[-1] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def compute_metrics(self):\n pass",
"def _reset_absolute_metrics(self):\n\n for pid, process_metrics in self.__metrics_history.items():\n for _metric, _metric_values in process_metrics.items():\n if not _metric.is_cumulative:\n self.__aggregated_metrics[_metric] = 0",
"def aggregate(global_params, running_aggregate, aggregation_result):\n running_ref = running_aggregate.get_ref('values')\n agg_ref = aggregation_result.get_ref('values')\n for i in range(global_params.dims):\n running_ref[i] += agg_ref[i]\n return running_aggregate",
"def calc_stat_values(self):",
"def calculate_batch_metrics(self):\n pass",
"def collect(self):\n if not self._btime:\n return\n\n try:\n with open(\"/proc/self/stat\", 'rb') as stat:\n parts = (stat.read().split(b')')[-1].split())\n\n self.process_metrics[\"vmem\"].set(\"\", float(parts[20]))\n self.process_metrics[\"rss\"].set(\"\", float(parts[21]) * _PAGESIZE)\n start_time_secs = float(parts[19]) / self._ticks\n self.process_metrics[\"start_time\"].set(\n \"\", start_time_secs + self._btime\n )\n utime = float(parts[11]) / self._ticks\n stime = float(parts[12]) / self._ticks\n self.process_metrics[\"cpu\"].set(\"\", utime + stime)\n except IOError:\n pass\n\n try:\n with open(\"/proc/self/limits\", 'rb') as limits:\n for line in limits:\n if line.startswith(b'Max open file'):\n self.process_metrics[\"max_fds\"].set(\n \"\", float(line.split()[3])\n )\n break\n\n self.process_metrics[\"open_fds\"].set(\n \"\", float(len(os.listdir(\"/proc/self/fd\")))\n )\n except (IOError, OSError):\n pass\n\n # Update gc metrics if enabled.\n if \"collected\" in self.process_metrics:\n for generation, stat in enumerate(gc.get_stats()):\n generation = {\"generation\": str(generation)}\n self.process_metrics[\"collected\"].set(\n generation, stat[\"collected\"]\n )\n self.process_metrics[\"uncollectable\"].set(\n generation, stat[\"uncollectable\"]\n )\n self.process_metrics[\"collections\"].set(\n generation, stat[\"collections\"]\n )",
"def calculate(self, data, *args, **kwargs):\n \n # Sets up priority queue, where data is prioritized by date\n queue = []\n \n # Sets up data dictionaries that will be used to contain calculated data\n severity_data = OrderedDict()\n status_data = OrderedDict()\n current_state = { }\n \n # List of fields used\n fields = [PROJECT, TRANS, STATUS, PRIORITY]\n \n # Populates priority queue with appropriate data\n for key, param_data in data.iteritems():\n # Grabs param_data fields\n priority = param_data.get(PRIORITY, None)\n hist = param_data.get(HIST, None)\n proj = param_data.get(PROJECT, self.project)\n \n # Adds the historical statuses of the current JIRA item to the queue\n if (hist):\n for i, date in enumerate(hist[TRANS]):\n heapq.heappush(queue, (date, proj, key, hist[NEW][i], priority))\n \n # Iterates through dates to populate status and severity data dictionaries\n if (queue):\n earliest = queue[0][0]\n for date in get_historical_dates(earliest, self.extraction_day, False):\n # Pops items off queue until queue is empty or date limit is reached\n while(queue and queue[0][0].date() <= date):\n curr, proj, key, status, priority = heapq.heappop(queue)\n \n # Maps the key's current parameters, overwriting previous mapping\n current_state[key] = { }\n for field, value in zip(fields, [proj, curr, status, priority]):\n current_state[key][field] = value\n \n # Sets severity and status metric data at the given date\n severity_data[date] = self._get_severity_data(current_state)\n status_data[date] = self._get_status_data(current_state)\n \n # Gets age data separately from status and severity\n age_map = self._get_average_age_data(data)\n \n return severity_data, status_data, age_map",
"def _compute_running_metrics(self,\n model_output: torch.Tensor,\n batch: Tuple[torch.Tensor, torch.Tensor],\n running_metrics: dict) -> None:\n for metric in self.metrics:\n if metric.__name__ == 'word_error_rate' or metric.__name__ == 'character_error_rate':\n metric_result = metric(model_output, batch, self.decoder)\n else:\n metric_result = metric(model_output, batch)\n if type(metric_result) == torch.Tensor:\n metric_result = metric_result.item()\n\n running_metrics[metric.__name__].append(metric_result)",
"def calculate_metrics(self):\n self.data_stats = self.sqlContext.read.format(\"org.apache.spark.sql.cassandra\").options(table=self.cassandra_trip_table, keyspace=self.cassandra_keyspace).load()\n self.data_stats = self.data_stats.groupBy(['time_block','day','month','borough_name']).agg(func.avg('num_trips').alias('mean'))",
"def accumulateSubgridMassHistory(self,q):\n pass",
"def aggregate_stats(self):\n if self.split_bn.track_running_stats:\n (\n self.bn.running_mean.data,\n self.bn.running_var.data,\n ) = self._get_aggregated_mean_std(\n self.split_bn.running_mean,\n self.split_bn.running_var,\n self.num_splits,\n )",
"def compute_metrics(self, results: list) -> dict:",
"def aggregate_statistics(self, new_stats):\n \n if isinstance(new_stats,RunStatistics):\n new_stats = [new_stats, ]\n elif isinstance(new_stats,list):\n if any(not isinstance(_,RunStatistics) for _ in new_stats):\n raise MadGraph5Error, \"The 'new_stats' argument of the function \"+\\\n \"'updtate_statistics' must be a (possibly list of) \"+\\\n \"RunStatistics instance.\"\n \n keys = set([])\n for stat in [self,]+new_stats:\n keys |= set(stat.keys())\n\n new_stats = new_stats+[self,]\n for key in keys:\n # Define special rules\n if key=='max_precision':\n # The minimal precision corresponds to the maximal value for PREC\n self[key] = min( _[key] for _ in new_stats if key in _)\n elif key=='min_precision':\n # The maximal precision corresponds to the minimal value for PREC\n self[key] = max( _[key] for _ in new_stats if key in _)\n elif key=='averaged_timing':\n n_madloop_calls = sum(_['n_madloop_calls'] for _ in new_stats if\n 'n_madloop_calls' in _)\n if n_madloop_calls > 0 :\n self[key] = sum(_[key]*_['n_madloop_calls'] for _ in \n new_stats if (key in _ and 'n_madloop_calls' in _) )/n_madloop_calls\n else:\n # Now assume all other quantities are cumulative\n self[key] = sum(_[key] for _ in new_stats if key in _)",
"def compute(self) -> Any:\n # ddp hotfix, could be done better\n # but metric must handle DDP on it's own\n if self._ddp_backend == \"xla\":\n device = get_device()\n for key in self.statistics:\n key_statistics = torch.tensor([self.statistics[key]], device=device)\n key_statistics = xm.all_gather(key_statistics).sum(dim=0).cpu().numpy()\n self.statistics[key] = key_statistics\n elif self._ddp_backend == \"ddp\":\n for key in self.statistics:\n value: List[np.ndarray] = all_gather(self.statistics[key])\n value: np.ndarray = np.sum(np.vstack(value), axis=0)\n self.statistics[key] = value\n\n per_class, micro, macro, weighted = get_aggregated_metrics(\n tp=self.statistics[\"tp\"],\n fp=self.statistics[\"fp\"],\n fn=self.statistics[\"fn\"],\n support=self.statistics[\"support\"],\n zero_division=self.zero_division,\n )\n if self.compute_per_class_metrics:\n return per_class, micro, macro, weighted\n else:\n return [], micro, macro, weighted",
"def compute(self) -> Any:\n # ddp hotfix, could be done better\n # but metric must handle DDP on it's own\n if self._ddp_backend == \"xla\":\n device = get_device()\n for key in self.statistics:\n key_statistics = torch.tensor([self.statistics[key]], device=device)\n key_statistics = xm.all_gather(key_statistics).sum(dim=0).cpu().numpy()\n self.statistics[key] = key_statistics\n elif self._ddp_backend == \"ddp\":\n for key in self.statistics:\n value: List[np.ndarray] = all_gather(self.statistics[key])\n value: np.ndarray = np.sum(np.vstack(value), axis=0)\n self.statistics[key] = value\n\n per_class, micro, macro, weighted = get_aggregated_metrics(\n tp=self.statistics[\"tp\"],\n fp=self.statistics[\"fp\"],\n fn=self.statistics[\"fn\"],\n support=self.statistics[\"support\"],\n zero_division=self.zero_division,\n )\n if self.compute_per_class_metrics:\n return per_class, micro, macro, weighted\n else:\n return [], micro, macro, weighted",
"def record_metrics(self, pid, metrics):\n\n for _metric, _metric_value in metrics.items():\n if not self.__metrics_history[pid].get(_metric):\n self.__metrics_history[pid][_metric] = []\n self.__metrics_history[pid][_metric].append(_metric_value)\n # only keep the last 2 running history for any metric\n self.__metrics_history[pid][_metric] = self.__metrics_history[pid][_metric][\n -2:\n ]",
"def compute(self) -> Any:\n per_class, micro, macro, weighted = get_aggregated_metrics(\n tp=self.statistics[\"tp\"],\n fp=self.statistics[\"fp\"],\n fn=self.statistics[\"fn\"],\n support=self.statistics[\"support\"],\n zero_division=self.zero_division,\n )\n return per_class, micro, macro, weighted",
"def calculate_dataset_metrics(self):\n pass",
"def calculate(self):\n avg = self.sum / self.n if self.n != 0 else 0\n self.running_avg.append(avg)\n return avg",
"def compute_key_value(self) -> Dict[str, float]:\n # @TODO: ddp hotfix, could be done better\n if self._is_ddp:\n for key in self.statistics:\n value: List[np.ndarray] = all_gather(self.statistics[key])\n value: np.ndarray = np.sum(np.vstack(value), axis=0)\n self.statistics[key] = value\n\n per_class, micro, macro, weighted = self.compute()\n metrics = self._convert_metrics_to_kv(\n per_class=per_class, micro=micro, macro=macro, weighted=weighted\n )\n return metrics",
"def read_metric_values(self):\n inv_objs = self._inventory_mgr.current_inventory()\n monitored_metrics = self._metric_mgr.get_monitored_metrics()\n perf_manager = self._si.RetrieveServiceContent().perfManager\n for mor in inv_objs.keys():\n for inv_obj in inv_objs[mor]:\n inv_obj_metrics = inv_obj.metric_id_map\n desired_keys = list(set(inv_obj_metrics.keys()) & set(monitored_metrics[mor].keys()))\n if not len(desired_keys) == 0:\n metric_id_objs = [inv_obj_metrics[key] for key in desired_keys]\n query_spec = vim.PerformanceManager.QuerySpec(\n entity=inv_obj.mor, metricId=metric_id_objs,\n intervalId=inv_obj.INSTANT_INTERVAL,\n maxSample=1, format='normal'\n )\n try:\n results = perf_manager.QueryPerf(querySpec=[query_spec])\n except Exception as e:\n self._logger.error(\"Exception while making performance query : {0}\".format(e))\n if results:\n dps = self._parse_query(inv_obj, results, monitored_metrics[mor])\n payload = self._build_payload(dps)\n self._dispatch_metrics(payload)\n else:\n self._logger.warning(\"Empty result from query : {0}\".format(query_spec))",
"def compute_statistics(self):",
"def sm_measure_current(self,num_readings=1):\n self.sm.set_measurement_function(\"CURRENT\")\n self.sm.format_readings(\"CURRENT\")\n ret = average(self.sm.take_measurement(num_readings))\n self.sm_restore_display\n return ret",
"def prepareAccumulatedMetrics(self):\n displayDF = analyzeMetricsDF(self.resultList)\n displayDF.to_csv(\"data/results.csv\")",
"def metrics_group():",
"def compute_metrics(self):\n self.finalize_output_dict()\n self.metric_dict = {\n key: value(self.output_dict[\"labels\"], self.output_dict[\"pred_probs\"])\n for key, value in self.metric_fns.items()\n }",
"def calculate_agrigate(self):\n self.total = 0.0\n for rec in self.data:\n self.total = self.total + rec[\"value\"]\n\n self.agrigate_data = {\n \"site\": self.site,\n \"utc\": self.timestamp_utc,\n \"local\": self.timestamp_local,\n \"tag\": \"TOTAL\",\n \"value\": round(self.total, 3)}\n self.data.append(self.agrigate_data)",
"def calculate(self):\n self.results['max'] = numpy.max(self.data)\n self.results['min'] = numpy.min(self.data)\n if self.type == 0:\n self.group_discrete_data()\n if self.type == 1:\n self.group_continuous_data()\n\n self.results['arithAvg'] = self.average([self.data[i] * self.occurrences[i] for i in range(len(self.data))],\n self.totalOccurrences)\n self.results['quadAvg'] = math.sqrt(\n self.average([(self.data[i] * self.data[i]) * self.occurrences[i] for i in range(len(self.data))],\n self.totalOccurrences))\n if self.results['min'] > 0:\n self.results['geoAvg'] = math.exp(\n self.average([numpy.log(self.data[i]) * self.occurrences[i] for i in range(len(self.data))],\n self.totalOccurrences))\n self.results['harmAvg'] = 1 / self.average(\n [(self.occurrences[i] / self.data[i]) for i in range(len(self.data))],\n self.totalOccurrences)\n else:\n self.results['geoAvg'] = self.results['harmAvg'] = \"N/A\"\n self.results['momentsR'] = self.moments(self.data, self.occurrences, 4)\n self.results['centralMomentsR'] = self.moments([(i - self.results['arithAvg']) for i in self.data],\n self.occurrences, 4)\n self.results['std'] = self.average(\n [self.occurrences[i] * abs(self.data[i] - self.results['arithAvg']) for i in range(len(self.data))],\n self.totalOccurrences)",
"def __calculate_agg_shap_scores(self):\n self.agg_stats_timer = SimbaTimer(start=True)\n for clf_state, clf_state_name in zip(range(2), [\"ABSENT\", \"PRESENT\"]):\n self.results = {}\n self.df_save_path = os.path.join(\n self.shap_logs_path,\n \"SHAP_summary_{}_{}_{}.csv\".format(\n self.classifier_name, clf_state_name, self.datetime\n ),\n )\n shap_clf_sliced = self.shap_df[\n self.shap_df[self.classifier_name] == clf_state\n ]\n for feature_category, feature_time_bin in itertools.product(\n self.unique_feature_category_names, self.unique_time_bin_names\n ):\n if feature_category not in self.results.keys():\n self.results[feature_category] = {}\n feature_names_sliced = list(\n self.feature_categories_df.loc[\n :, (feature_category, feature_time_bin)\n ]\n )\n feature_names_sliced = [\n x\n for x in feature_names_sliced\n if str(x) != \"nan\" and x in shap_clf_sliced\n ]\n self.results[feature_category][feature_time_bin] = round(\n shap_clf_sliced[feature_names_sliced].sum(axis=1).mean() * 100, 6\n )\n self.__save_aggregate_scores()\n self.agg_stats_timer.stop_timer()\n self.visualization_timer = SimbaTimer(start=True)\n\n stdout_success(\n msg=f\"Aggregate SHAP statistics saved in {self.shap_logs_path} directory\",\n elapsed_time=self.agg_stats_timer.elapsed_time_str,\n )",
"def _calculate(self):\n source = self.source\n res = {}\n l_cols = [[], [], [], []]\n r_lines = {}\n dateline=None\n ###delete the below code when fetch data from database(assume: data in database has been pretreatment)\n if source[t.ror].min() > -99.0:\n pass\n else:\n source[t.ror] = np.where(\n source[t.ror] > -99.0, source[t.ror], -99.0)\n ###\n for account in self.accounts:\n source_account = source[source[t.account] == account]\n source_account = source_account.reset_index(drop=True)\n dateline=source_account[t.effective_date]\n ror=source_account[t.ror]/100\n returns_cum = ROR.ror_cum_ann(source_account, self.annualized)\n # double_return_cum=round(double_return_cum,2)+1\n returns_cum = returns_cum + 1\n growth_amounts = returns_cum * self.starting_value\n returns_cum, growth_amounts = round(returns_cum - 1, 4), \\\n round(growth_amounts, 2)\n l_cols[0].append(growth_amounts.iloc[-1, 0])#account growth amount\n l_cols[1].append(growth_amounts.iloc[-1, 1])#bench growth amount\n l_cols[2].append(returns_cum.iloc[-1, 0])#account return\n l_cols[3].append(returns_cum.iloc[-1, 1])#bench return\n r_lines[account] = [list(returns_cum.iloc[:,0]), list(growth_amounts.iloc[:, 0]),#list(returns_cum.iloc[:, 0])\n list(growth_amounts.iloc[:, 1])]#account return, account growth amount, bench growth amount\n res['account_vs_benchmark'] = {'xAxis': self.accounts,\n 'series': l_cols}\n res['growth_of_unit'] = {'xAxis': list(dateline),\n 'series': r_lines}\n return res\n # ret_dict = self._ret(accounts, starting_value, source, annualized)\n # return ret_dict"
] | [
"0.65272045",
"0.6503945",
"0.6044549",
"0.5962655",
"0.5960727",
"0.5932751",
"0.58754987",
"0.5782237",
"0.5775919",
"0.5757956",
"0.57359225",
"0.57347035",
"0.5726887",
"0.57170856",
"0.57170856",
"0.56863284",
"0.56776404",
"0.56517655",
"0.56427175",
"0.5625753",
"0.56163687",
"0.5585638",
"0.5527937",
"0.55202895",
"0.54722095",
"0.5436671",
"0.543601",
"0.5426373",
"0.54232574",
"0.53982216"
] | 0.8260474 | 0 |
Collect the perprocess tracker for the monitored process(es). | def gather_sample(self):
for _pid in self._select_processes():
if not self.__trackers.get(_pid):
self.__trackers[_pid] = ProcessTracker(_pid, self._logger, self.__id)
self._reset_absolute_metrics()
for _tracker in self.__trackers.values():
_metrics = _tracker.collect()
self.record_metrics(_tracker.pid, _metrics)
self._calculate_aggregated_metrics()
self._remove_dead_processes()
self.print_metrics() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _proc_collect(self) -> None:\n while True:\n self.process_num_threads.set(self._process.num_threads())\n self.process_memory_bytes.set(self._process.memory_info().rss)\n self.process_cpu_percent.set(self._process.cpu_percent())\n\n sleep(self.process_scrape_interval)",
"def collect(self):\n if not self._btime:\n return\n\n try:\n with open(\"/proc/self/stat\", 'rb') as stat:\n parts = (stat.read().split(b')')[-1].split())\n\n self.process_metrics[\"vmem\"].set(\"\", float(parts[20]))\n self.process_metrics[\"rss\"].set(\"\", float(parts[21]) * _PAGESIZE)\n start_time_secs = float(parts[19]) / self._ticks\n self.process_metrics[\"start_time\"].set(\n \"\", start_time_secs + self._btime\n )\n utime = float(parts[11]) / self._ticks\n stime = float(parts[12]) / self._ticks\n self.process_metrics[\"cpu\"].set(\"\", utime + stime)\n except IOError:\n pass\n\n try:\n with open(\"/proc/self/limits\", 'rb') as limits:\n for line in limits:\n if line.startswith(b'Max open file'):\n self.process_metrics[\"max_fds\"].set(\n \"\", float(line.split()[3])\n )\n break\n\n self.process_metrics[\"open_fds\"].set(\n \"\", float(len(os.listdir(\"/proc/self/fd\")))\n )\n except (IOError, OSError):\n pass\n\n # Update gc metrics if enabled.\n if \"collected\" in self.process_metrics:\n for generation, stat in enumerate(gc.get_stats()):\n generation = {\"generation\": str(generation)}\n self.process_metrics[\"collected\"].set(\n generation, stat[\"collected\"]\n )\n self.process_metrics[\"uncollectable\"].set(\n generation, stat[\"uncollectable\"]\n )\n self.process_metrics[\"collections\"].set(\n generation, stat[\"collections\"]\n )",
"def monitoredProcs(self):\n return self._pidToProcess.itervalues()",
"def processStats(self):\n return self._processes.itervalues()",
"def setup_process_stats(pid):\n return psutil.Process(pid)",
"def process():\n interesting_procs = set(INTERESTING_PROCESSES)\n\n pids = psutil.pids()\n info = {\n \"stats_type\": \"process\",\n \"proc\": {\n \"count\": len(pids),\n }\n }\n proc_root = os.environ.get(\"PROC_ROOT\", \"/proc\")\n for pid in pids:\n proc_info = proc.core.Process.from_path(\n os.path.join(proc_root, str(pid)))\n\n proc_name = get_proc_name(proc_info, interesting_procs)\n if not proc_name:\n continue\n\n if 'sshd' in proc_name and ':' in proc_info.cmdline:\n continue\n\n if proc_name not in info['proc']:\n info['proc'][proc_name] = {\n 'running': proc_info.state in ('R', 'S', 'D', 'T', 'W'),\n 'pid': proc_info.pid,\n 'ppid': proc_info.ppid,\n 'user_time': int(proc_info.stat_fields[16]), # cutime\n 'sys_time': int(proc_info.stat_fields[17]), # cstime\n 'vsize': proc_info.vsize,\n 'rss': proc_info.rss,\n 'voluntary_ctxt_switches': int(proc_info.status_fields[\n 'voluntary_ctxt_switches']),\n 'nonvoluntary_ctxt_switches': int(proc_info.status_fields[\n 'nonvoluntary_ctxt_switches']),\n 'age': proc_info.runtime,\n 'count': 1\n }\n else:\n pinfo = info['proc'][proc_name]\n pinfo['count'] += 1\n\n def append(dest, field, value):\n \"\"\"Append values for an existing process.\"\"\"\n if isinstance(dest[field], list):\n dest[field].append(value)\n else:\n dest[field] = [dest[field], value]\n\n # append('state', proc_info.state)\n append(pinfo, 'pid', proc_info.pid)\n append(pinfo, 'ppid', proc_info.ppid)\n pinfo['user_time'] += int(proc_info.stat_fields[16]) # cutime\n pinfo['sys_time'] += int(proc_info.stat_fields[17]) # cstime\n pinfo['vsize'] += proc_info.vsize\n pinfo['rss'] += proc_info.rss\n pinfo['voluntary_ctxt_switches'] = \\\n int(proc_info.status_fields['voluntary_ctxt_switches'])\n pinfo['nonvoluntary_ctxt_switches'] = \\\n int(proc_info.status_fields['nonvoluntary_ctxt_switches'])\n append(pinfo, 'age', proc_info.runtime)\n\n return info",
"def _collect_set(self, pidset):",
"def GetPublishedProcesses():\r\n pass",
"def collect(self):\n\n collector = {}\n for gather in self.gathers:\n try:\n stats = gather.run_single_cycle(collector=collector)\n if stats:\n collector.update(stats)\n except Exception as ex:\n self._logger.exception(\n \"Exception while collecting metrics for PID: %s of type: %s. Details: %s\",\n self.pid,\n type(gather),\n repr(ex),\n )\n return collector",
"def add_process(self):\r\n\r\n proc_dict = dict()\r\n total_count = len(self.newest_connections['pid'].unique())\r\n count = 0\r\n for proc in self.newest_connections['pid'].unique():\r\n count += 1\r\n percent = round((count / total_count * 100))\r\n print('{}{}Identifying processes in progress. Accomplished: {}%{}'.format(Colors.GREEN,Colors.BOLD,percent,Colors.END), end='\\r')\r\n output = subprocess.run([\"powershell.exe\", \"-Command\", f'Get-Process -Id {proc} | select-object -Property ProcessName | ft -HideTableHeaders'], capture_output=True, text=True).stdout.strip()\r\n proc_dict[proc] = output\r\n print()\r\n processes = pd.Series(proc_dict)\r\n processes_df = pd.DataFrame(processes.reset_index())\r\n processes_df.columns = ['pid', 'process_name']\r\n if 'process_name' in self.newest_connections:\r\n self.newest_connections = pd.merge(self.newest_connections, processes_df, on=['pid', 'process_name'], how='right')\r\n else:\r\n self.newest_connections = pd.merge(self.newest_connections, processes_df, on='pid', how='right')\r\n return self.newest_connections",
"def get_process_list() -> Dict:\n return {proc.pid: proc.name() for proc in psutil.process_iter()}",
"def monitor(self):\n procdata = self.collect_userprocs_info()\n now = int(time.time())\n #-------------------\n proclist = []\n for name in procdata:\n mem = procdata[name]['rss']\n pcode = self.DB.get_code(name)\n proclist.append((now, pcode, mem))\n self.DB.add_proc_info(proclist)\n #-------------------\n totmem = psutil.virtual_memory()\n self.DB.add_total_mem_info(now, totmem.used, totmem.available, totmem.free)\n #-------------------\n disk = psutil.disk_usage('/')\n dinfo = {\n \"utime\" : now,\n \"total\" : disk.total,\n \"used\" : disk.used,\n \"free\" : disk.free,\n \"percent\" : disk.percent\n }\n self.DB.add_diskuse_info(dinfo)\n #-------------------\n cpu = json.dumps(psutil.cpu_percent(None, True))\n self.DB.add_total_cpu(now, cpu)\n #-------------------\n net = psutil.net_io_counters()\n ninfo = {\n \"utime\" : now,\n \"brecv\" : net.bytes_recv,\n \"bsent\" : net.bytes_sent,\n \"precv\" : net.packets_recv,\n \"psent\" : net.packets_sent,\n \"errin\" : net.errin,\n \"errin\" : net.errout\n }\n self.DB.add_net_info(ninfo)",
"def _start_proc_collector(self) -> None:\n thread = threading.Thread(target=self._proc_collect, name=\"ProcessMetricsCollector\", daemon=True)\n thread.start()",
"def resource_collect(pid=None):\n try:\n import psutil\n except ImportError:\n return {}\n\n p = psutil.Process(pid or os.getpid())\n return {'cpu_percent': psutil.cpu_percent(),\n 'status': p.status(),\n 'memory_percent': p.memory_percent(),\n 'memory_info_ex': p.memory_info_ex(),\n 'disk_io_counters': metrics.disk_io_counters(),\n 'net_io_counters': metrics.net_io_counters()}",
"def identify_processes(self) -> Dict[int, dict]:\n\n processes = {}\n\n for process in self.behavior[\"generic\"]:\n\n proc_name, proc_path = split_path(process[\"process_path\"])\n\n processes[int(process[\"pid\"])] = {\n FieldNames.PROCESS_IMAGE: proc_name,\n FieldNames.PROCESS_IMAGE_PATH: proc_path,\n FieldNames.PROCESS_ID: int(process[\"pid\"]),\n }\n\n return processes",
"def processes(self):\n # MODIFIED 11/1/16 OLD:\n return list(item.process for item in self.process_tuples)\n # # MODIFIED 11/1/16 NEW:\n # return sorted(list(item.process for item in self.process_tuples), key=lambda process: process.name)\n # MODIFIED 11/1/16 END",
"def monitor(self):\n for idx, process in enumerate(self.__process_list):\n process.id_number = idx + 1\n while len(self.__process_list) > 0:\n for process in list(self.__process_list):\n if not process.has_output():\n _return_code = process.return_code\n self.__process_list.remove(process)\n if _return_code == 0:\n logger.info(\"Finished process #{}: there are now {}/{} running\".format(process.id_number, len(self.__process_list), self.__n_initial))\n else:\n logger.warning(\"Process #{} terminated unexpectedly (return code {}): there are now {}/{} running\".format(process.id_number, _return_code, len(self.__process_list), self.__n_initial))",
"def pids(self):\n return self._pidToProcess.iterkeys()",
"def get_processes():\n yield from psutil.process_iter()",
"def monitorAll(self):\n\n websites = self.user.mySites.values()\n\n # subprocesses to get the requests logs\n self.processes = [Process(target=self.monitorOne, args=(website,)) for website in websites]\n\n for process in self.processes:\n process.daemon = True\n\n for process in self.processes:\n process.start()\n\n for process in self.processes:\n process.join()\n\n return",
"def allprocs(self):\n\n processes = self.getHash( 'nameHash' ) # safely get copy of process name dictionary\n\n allprocs = {}\n\n for p in processes.keys():\n allprocs[p] = processes[p].procinfo()\n\n return allprocs",
"def allprocs(self):\n\n processes = self.getHash( 'nameHash' ) # safely get copy of process name dictionary\n\n allprocs = {}\n\n for p in processes.keys():\n allprocs[p] = processes[p].procinfo()\n\n return allprocs",
"def _select_processes(self):\n\n # check if at least one process is running\n is_running = False\n for pid in self.__pids:\n if ProcessMonitor.__is_running(pid):\n is_running = True\n break # at least one process is running\n\n if is_running:\n if not self.__aggregate_multiple_processes:\n return self.__pids\n\n # aggregate metrics, check the last discovered time\n if (\n self.__last_discovered\n and time.time() * 1000 - self.__last_discovered\n < self.__process_discovery_interval * 1000\n ):\n return self.__pids\n\n ps = ProcessList()\n if self.__commandline_matcher:\n self.__last_discovered = time.time() * 1000\n if self.__include_child_processes:\n matched_processes = ps.get_matches_commandline_with_children(\n self.__commandline_matcher\n )\n else:\n matched_processes = ps.get_matches_commandline(\n self.__commandline_matcher\n )\n self.__pids = matched_processes\n\n if not self.__aggregate_multiple_processes and len(self.__pids) > 1:\n # old behaviour where multiple processes were not supported for aggregation\n self._logger.warning(\n \"Multiple processes match the command '%s'. Returning existing pid. \"\n \"You can turn on the multi process aggregation support by adding the \"\n \"aggregate_multiple_processes configuration to true\"\n % self.__commandline_matcher,\n limit_once_per_x_secs=300,\n limit_key=\"linux-process-monitor-existing-pid\",\n )\n self.__pids = [self.__pids[0]]\n else:\n # See if the specified target pid is running. If so, then return it.\n # Special cases:\n # '$$' mean this process.\n # '$$TBD' mean that the PID of the target process has not been determined yet and it will be set later.\n pids = []\n if self.__target_pids:\n for t_pid in self.__target_pids:\n if t_pid == \"$$\":\n t_pid = int(os.getpid())\n\n # skip this until it will be replaced with a real PID.\n elif t_pid == \"$$TBD\":\n continue\n else:\n t_pid = int(t_pid)\n pids.append(t_pid)\n self.__pids = pids\n return self.__pids",
"def get_process_mapping():\n with open('/proc/{0}/stat'.format(os.getpid())) as f:\n self_tty = f.read().split()[STAT_TTY]\n processes = {}\n for pid in os.listdir('/proc'):\n if not pid.isdigit():\n continue\n try:\n stat = '/proc/{0}/stat'.format(pid)\n cmdline = '/proc/{0}/cmdline'.format(pid)\n with open(stat) as fstat, open(cmdline) as fcmdline:\n stat = re.findall(r'\\(.+\\)|\\S+', fstat.read())\n cmd = fcmdline.read().split('\\x00')[:-1]\n ppid = stat[STAT_PPID]\n tty = stat[STAT_TTY]\n if tty == self_tty:\n processes[pid] = Process(\n args=tuple(cmd), pid=pid, ppid=ppid,\n )\n except IOError:\n # Process has disappeared - just ignore it.\n continue\n return processes",
"def pid_processes(self):\n return [(process.namespec(), process.infos[self.address_name]['pid'])\n for process in self.processes.values()\n if process.pid_running_on(self.address_name)]",
"def get_processes_running():\r\n p = [] #array of processes\r\n if platform == \"linux\" or platform == \"linux2\":\r\n for proc in psutil.process_iter():\r\n try:\r\n tmp=Process(proc.name(),int(proc.pid),proc.username(),int(0),int(0))\r\n p.append(tmp)\r\n except:\r\n continue\r\n return (p)\r\n\t\t\t\r\n tasks = check_output(['tasklist']).decode('cp866', 'ignore').split(\"\\r\\n\")\r\n for task in tasks:\r\n m = re.match(b'(.*?)\\\\s+(\\\\d+)\\\\s+(\\\\w+)\\\\s+(\\\\w+)\\\\s+(.*?)\\\\s.*', task.encode())\r\n if m is not None:\r\n tmp=Process(m.group(1).decode(),int(m.group(2).decode()),m.group(3).decode(),int(m.group(4).decode()),int(m.group(5).decode('ascii', 'ignore')))\r\n p.append(tmp)\r\n #m.group(1).decode() image name\r\n #m.group(2).decode() process id\r\n #m.group(3).decode() session_name\r\n #m.group(4).decode() session_num\r\n #m.group(5).decode('ascii', 'ignore') memory usage\r\n return(p)",
"def get_processes_info():\n processes_list = []\n for proc in get_processes():\n try:\n # Fetch process details as dict\n pinfo = proc.as_dict(attrs=[\"pid\", \"name\", \"username\"])\n pinfo[\"rss\"] = proc.memory_info().rss / (1024 * 1024)\n pinfo[\"ports\"] = []\n try:\n connections = proc.connections()\n except psutil.Error:\n continue\n if connections:\n for conn in connections:\n pinfo[\"ports\"].append({\"port\": conn.laddr.port, \"status\": conn.status})\n # Append dict to list\n processes_list.append(pinfo)\n except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):\n pass\n processes_list = sorted(processes_list, key=lambda procObj: procObj[\"rss\"], reverse=True)\n return processes_list[:25]",
"def get_processes(self):\n processes = {}\n # Get ps output\n cmd = [\"ps\", \"-Z\"]\n # Split by newlines and remove first line (\"LABEL USER PID PPID NAME\")\n # TODO: surround with try/except?\n psz = subprocess.check_output(self.shell + cmd).decode().split('\\n')[1:]\n for line in psz:\n line = line.strip(\"\\r\")\n if line:\n try:\n p = Process(line, self.android_version)\n except ValueError as e:\n self.log.warning(e)\n else:\n processes[p.pid] = p\n return processes",
"def __iter__(self):\n seen = self.seen\n if time_now() - self.last_cleanup_time > self.cleanup_seen_interval:\n # Time to cleanup seen set\n to_remove = set()\n for pid in seen:\n # Remove from seen if PID no longer running\n if not P.exists(P.join(PROC_DIR, str(pid))):\n to_remove.add(pid)\n\n seen -= to_remove\n self.last_cleanup_time = time_now()\n\n for file in os.listdir(PROC_DIR):\n try:\n pid = int(file)\n if pid not in seen:\n self._new_pids.append(pid)\n\n except ValueError:\n # Non PID file in /proc\n pass\n\n seen.update(self._new_pids)\n\n return self",
"def reload(self):\n\t\tdel self.processes\n\t\tself.processes = {}\n\t\tpids = os.listdir(self.basedir)\n\t\tfor spid in pids:\n\t\t\ttry:\n\t\t\t\tpid = int(spid)\n\t\t\texcept:\n\t\t\t\tcontinue\n\n\t\t\tself.processes[pid] = process(pid, self.basedir)"
] | [
"0.70074445",
"0.70008403",
"0.65227914",
"0.6477468",
"0.64733964",
"0.6447299",
"0.64127636",
"0.6285023",
"0.6207518",
"0.6167097",
"0.6145146",
"0.61055756",
"0.6044723",
"0.6031808",
"0.5893498",
"0.58866787",
"0.58456194",
"0.5834667",
"0.5807094",
"0.5745234",
"0.5732411",
"0.5732411",
"0.5665456",
"0.5664748",
"0.5649203",
"0.564741",
"0.56443006",
"0.56217676",
"0.56139535",
"0.5609689"
] | 0.7125841 | 0 |
Set the PID of the process that was marked as $$TBD. | def set_pid(self, pid): # type: (int) -> None
for i in range(len(self.__target_pids)):
if self.__target_pids[i] == "$$TBD":
self.__target_pids[i] = pid
break | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def def_pid(self,pid):\n self.pid=int(pid)",
"def pid(self, pid):\n\n self._pid = pid",
"def pid(self, pid):\n\n self._pid = pid",
"def _update_PID(self):\n self.pid = PID(p=self.paramP, i=self.paramI, d=self.paramD, setpoint=self.voltageSetpoint, memory=self.paramMemory)",
"def set_hold():\n hold = request.params.get(\"hold\", 0) == \"true\"\n pid = request.params.get(\"pid\", 1, type=int)\n retval = RP_LIB.rp_PIDSetHold(pid, hold)\n if retval != 0:\n LOG.error(\"Failed to set PID internal state holding. Error code: %s\", ERROR_CODES[retval])",
"def pid():\n return 0x0204",
"def pid():\n return 0x0204",
"def process_id(self, process_id):\n\n self._process_id = process_id",
"def process_id(self, process_id):\n\n self._process_id = process_id",
"def pid(self):",
"def dynamic_pid(self):\n pass",
"def test_missingPIDVariable(self):\n fakeEnvironment = self.initializeEnvironment(3, os.getpid())\n del fakeEnvironment['LISTEN_PID']\n sddaemon = ListenFDs.fromEnvironment(environ=fakeEnvironment)\n self.assertEqual([], sddaemon.inheritedDescriptors())",
"def ppid(self):",
"def cli_set_process_title():\n raise NotImplementedError()",
"def setParentID(self, pid='0'):\n self.PUID = pid\n logger.debug('parentID set to: %s' % self.PID)",
"def setInitialProcessUID(self, puid):\n\n self.p_uid = puid\n return",
"def set_tid(self, tid):\n self.__tid = tid",
"def try_set_process_name(self, name=None):\n if name is None:\n name = getattr(self, 'process_name', None)\n if name is None:\n return\n try:\n import setproctitle\n setproctitle.setproctitle(name)\n except (ImportError, AttributeError):\n pass",
"async def setprob(self, ctx, problem_name=None):\n if problem_name:\n if not await problem_exists(ctx, problem_name):\n return\n current_problem[ctx.author.id] = problem_name\n if problem_name:\n await ctx.send('Problem successfully set.')\n else:\n await ctx.send('The bot will no longer check your submissions.')",
"def free_pid():\n host, pid, tid = get_process_id()\n while True:\n # PIDs are often restricted to a small range. On Linux the range >32k is by default not used.\n pid = random.randint(33000, 65000)\n if not process_alive(host, pid, tid):\n return pid",
"async def set_post_number(self, ctx: commands.Context, post_num: int = 0):\n await ctx.cfg_channel.current_post_num.set(post_num)\n await ctx.send(\"Current auto-post number has been set to {}\".format(post_num))\n await ctx.cfg_channel.last_post_time.set(0)",
"def pid(self):\n\t\treturn self.__pid",
"def __init__(self, pid):\n self.pid = pid\n self.refresh_code_ranges()",
"def _kill(self) -> None:\n if not hasattr(self, \"proc\"):\n raise FuzzFrontendError(\"Attempted to kill non-running PID.\")\n\n self.proc.terminate()\n try:\n self.proc.wait(timeout=0.5)\n L.info(\"Fuzzer subprocess exited with `%d`\", self.proc.returncode)\n except subprocess.TimeoutExpired:\n raise FuzzFrontendError(\"Subprocess could not terminate in time\")\n\n self._on = False",
"def set_mintty_title(title):\n\n is_mintty = False\n see('sys.platform')\n if 'win' in sys.platform.lower():\n try:\n p = subprocess.Popen(['ps'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n except Exception as e:\n parser.error('Error executing `ps`: {e!s}'.format(**locals()))\n (stdout, stderr) = p.communicate()\n stdout = stdout.decode('utf-8')\n stderr = stderr.decode('utf-8')\n rc = p.wait()\n log.info('ps: {rc}, {stdout!r}, {stderr!r}'.format(**locals()))\n lines = stdout.splitlines()\n see('lines')\n if lines:\n headings = lines[0].split()\n see('headings')\n processes = {}\n for line in lines[1:]:\n tokens = line.split()\n see('tokens')\n pid = tokens[0]\n if len(tokens) <= len(headings):\n see('pid')\n processes[pid] = {}\n for (pos, heading) in enumerate(headings):\n processes[pid][heading] = tokens[pos]\n see('processes')\n process = processes.get(str(os.getpid()))\n while True:\n see('process')\n if process:\n if process['COMMAND'] == '/usr/bin/mintty':\n is_mintty = True\n break\n else:\n process = processes.get(process['PPID'])\n else:\n break\n see('is_mintty')\n\n if is_mintty:\n log.info('changing mintty title to: {title!r}'.format(**locals()))\n sys.stdout.write('\\x1b]0;{title}\\x07'.format(**locals()))",
"def pb_id(self, pb_id: str):\n # FIXME(BMo) instead of creating the object to check if the PB exists\n # use a method on PB List?\n # ProcessingBlock(pb_id)\n self.set_state(DevState.ON)\n self._pb_id = pb_id",
"def set_pno(self, pno):\n self.__pno = pno",
"def addSlavePid(self, pid):\n if self._logger is not None:\n self._logger.debug('Adding slave PID ' + str(pid))\n if not pid in self._all_processes_pid: # Make sure we don't add twice a PID\n self._all_processes_pid += [pid] # Add",
"def test_003_pid(self):\n HEADING()\n pid = self.db.pid()\n print (pid)\n assert True",
"def pid(self):\n return self.__pid"
] | [
"0.6045698",
"0.6005458",
"0.6005458",
"0.5414311",
"0.5351061",
"0.5235025",
"0.5235025",
"0.52202576",
"0.52202576",
"0.51928914",
"0.51871926",
"0.50754386",
"0.5040368",
"0.5033574",
"0.49666995",
"0.49178597",
"0.49152836",
"0.49008197",
"0.4899533",
"0.48833144",
"0.4882286",
"0.48132563",
"0.48048356",
"0.47864658",
"0.4761764",
"0.47297344",
"0.47267908",
"0.4726245",
"0.47245863",
"0.4705036"
] | 0.79088074 | 0 |
Get the probability of a word following a context. i.e. The conditional probability P(word|context) | def prob(self, word, context=None):
if not context:
context = ()
else:
context = tuple(context)
prob = 0
for i in range(len(context) + 1):
prob += self.weights[i] * self.ngram_cpd[context[i:]][word]
return prob | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def prob(self, word, context):\n\n context = tuple(context)\n \n context_lenth = len(context) \n if context_lenth == 0:\n line = ''\n elif context_lenth == 1:\n line = context[0]\n elif context_lenth >= 2:\n line = context[0]\n for each_word in context[1:]:\n line = line + ' ' + each_word\n line = line + ' ' + word\n \n try:\n #print self.slct % (line)\n self.cursor.execute(self.slct % (line))\n data = self.cursor.fetchall()\n except Exception, e:\n print \"Error happened when access gramc DB: \", e\n return 1\n \n if len(data):\n cnt = data[0][0]\n #result = 0.0\n result = cnt / self.cnt_sum[context_lenth+1]\n #print result\n if result == 0:\n result = 1\n return result\n elif context_lenth == 0:\n return 1\n else:\n return self.prob(word, context[1:])",
"def get_word_probability(self, label, term):\n\n if 'sod' in label:\n return self.cond_prob_sod[term]\n elif 'pop' in label:\n return self.cond_prob_pop[term]\n else:\n print(\"Just run the doctest Dev\")\n \n pass",
"def word_probability(self, word, prev):\n bg = \"{} {}\".format(prev, word)\n p_c = self.model[word] if word in self.model else 1e-10 \n p_cw = self.bigrams[bg] if bg in self.bigrams else 1e-10 \n p = p_c * p_cw if prev else p_c\n return p",
"def cond_prob(self, event, context):\n count = self.table[event, context] + self.prior\n norm = self.margin[context] + (self.prior * len(self.alphabet))\n return count / norm",
"def probability(self, words):\n prob = 1\n for w in words: # Loop through words\n if w not in self.mdl.index: # Not in tokens\n return 0\n else: # Get prob\n prob *= self.mdl.loc[w] \n return prob",
"def probability(self, words):\n prob = 1\n for w in words: # Loop through words\n if w not in self.mdl.index: # Not in tokens\n return 0\n else: # Get prob\n prob *= self.mdl.loc[w] \n return prob",
"def word_probability(self, word: str) -> int:\n try:\n return self.fdist[word.lower()] / len(self.fdist.keys())\n except KeyError:\n return 0.0",
"def logprob(self, word, context):\n\n return -log(self.prob(word, context), 2)",
"def get_probability(some_dict, some_string):\n lowercase_review = some_string.lower()\n split_review = lowercase_review.split()\n product = 1 \n for word in split_review:\n if word not in some_dict:\n probability = 0.00009\n #assigning unknown words a probability very close to zero\n else: \n probability = some_dict[word]\n product *= probability\n return product",
"def cond_prob(self, token, prev_tokens=None):\n\n if not prev_tokens:\n assert self.n == 1\n prev_tokens = tuple()\n # ngram condicional probs are based on relative counts\n hits = self.count((tuple(prev_tokens)+(token,)))\n sub_count = self.count(tuple(prev_tokens))\n\n return hits / float(sub_count)",
"def get_probability(self, word: Word):\n if len(word) == 0:\n return 0.0\n\n _check_is_legal_word(word, self.alphabet_size)\n result = 1.0\n current_state = self.initial_state\n for character in word:\n if current_state is None:\n return 0.0\n\n next_state, probability = self.transition_dict.get(current_state, {}).get(\n character, (None, 0.0)\n )\n current_state = next_state\n result *= probability\n\n return 0.0 if current_state != self.final_state else result",
"def context_probabilities(self, context):\n if context not in self._cache.keys():\n self._cache[context] = {\n word: self.score(word, context) for word in self.vocab.counts.keys()\n }\n return self._cache[context]",
"def prob(self, sequence):\n prob = 1\n for event, context in self.extract_ngrams(sequence):\n prob *= self.cond_prob(event, context)\n return prob",
"def compute_propability(word, label, dict):\n return dict[label][word] / sum(dict[label].values())",
"def calculate_word_probabilities(word):\n\n\tprobabilities = {\"one\":0,\"two\":0,\"three\":0,\"four\":0,\"five\":0}\n\n\tfor star in range(1,6):\n\t\tconditional = float(word[number_to_text[star]])/statements_with_star[star]\n\t\tprobabilities[number_to_text[star]]=conditional*10\n\n\tdb.words.update({\"_id\":ObjectId(word[\"_id\"])},{\"$set\":{\"conditionals\":probabilities}})\n\n\treturn 1",
"def probability(self, words):\n if len(words) == 0:\n return 0\n \n prob = 1\n model = self.mdl\n \n words_ngram = NGramLM(self.N, []).create_ngrams(words) # Create NGram model for words\n for ngram in words_ngram:\n # Never seen before ngram or n-1gram\n if (ngram not in list(model['ngram'])) or (ngram[:-1] not in list(model['n1gram'])):\n return 0\n if isinstance(self, NGramLM):\n prob *= model[model['ngram'] == ngram]['prob'].values[0]\n \n def recur_prob(model, w):\n prob = 1\n prev_mod = model.prev_mdl\n if isinstance(prev_mod, UnigramLM): # Unigram base case\n prob *= prev_mod.mdl[w[0]]\n else:\n words_n1gram = NGramLM(prev_mod.N, []).create_ngrams(w) # Create NGram model for words\n prob *= prev_mod.mdl[prev_mod.mdl['ngram'] == words_n1gram[0]]['prob'].values[0]\n prob *= recur_prob(prev_mod, words_n1gram[0]) # Recursive call\n return prob\n\n prob *= recur_prob(self, words_ngram[0])\n \n return prob",
"def p_word_given_label(vocab, training_data, label):\n\n smooth = 1 # smoothing factor\n word_prob = {}\n # TODO: add your code here\n total_word = 0\n\n word_prob[None] = 0\n\n\n for dic in training_data:\n\n for index0, i0 in enumerate(dic['bow']):\n if (list(dic['bow'])[index0] in word_prob):\n continue;\n word_prob[list(dic['bow'])[index0]] = 0\n #word_prob[None] = 0\n if(dic[\"label\"] == label):\n for index, i in enumerate(dic[\"bow\"]):\n if(list(dic['bow'])[index] in vocab):\n if(list(dic['bow'])[index] in word_prob):\n\n word_prob[list(dic['bow'])[index]] += dic[\"bow\"][i]\n else:\n word_prob[list(dic['bow'])[index]] = dic[\"bow\"][i]\n else:\n if(None in word_prob):\n word_prob[None] += dic[\"bow\"][i]\n else:\n word_prob[None] = 0\n\n total_word += dic[\"bow\"][i]\n #word_prob [None] = 5\n\n for h in word_prob:\n word_prob[h] = math.log((word_prob[h] + smooth*1)) - math.log((total_word + smooth*(len(vocab) +1)))\n\n\n return word_prob",
"def _computeCondProb(self, testData, classValue):\n classAttrObj = self._classAttrs[classValue]\n frequencyDict = classAttrObj.frequencyDict\n totalDocsInClass = classAttrObj.totalDocsInClass\n\n result = (totalDocsInClass/self._totalTrainDocs) # P(c)\n # Compute P(t|c) for each t in d\n for word in testData:\n result *= ((frequencyDict.get(word, 0) + 1) / (sum(frequencyDict.values()) + self._sizeOfVocabulary))\n return result",
"def predictability(self):\n temp = self.probs\n for n in range(10):\n temp = temp.dot(temp)\n final = temp[0,:]\n #Let's assume that all words have unique initial letters\n probs = map(len, self.words)\n probs = array(probs)\n probs = (probs + self.probs.max(1)-1)/probs\n return sum(final*probs)",
"def next_word_probability(self, observation, partial_out):\n if not hasattr(self, 'prev_enc'):\n self.prev_enc = None\n self.last_text = None\n if observation['text'] != self.last_text:\n self.prev_enc = None\n self.last_text = observation.get('text')\n self.observe(observation)\n\n obs = self.observation\n obs['eval_labels'] = [' '.join(partial_out)]\n batch = self.vectorize([obs])\n self.model.eval()\n self.model.longest_label = 1 # no need to predict farther ahead\n out = self.model(\n batch[0], # xs\n ys=(batch[1] if len(partial_out) > 0 else None),\n prev_enc=self.prev_enc)\n scores, self.prev_enc = out[1], out[3]\n # scores is bsz x seqlen x num_words, so select probs of current index\n assert len(partial_out) == scores.size(1) - 1\n probs = F.softmax(scores.select(1, len(partial_out)), dim=1).squeeze().cpu()\n dist = self.probs\n for i in range(len(probs)):\n try:\n val = probs[i].item()\n except AttributeError:\n val = probs[i][0]\n dist[self.dict[i]] = val\n self.batch = batch\n return dist",
"def compute_probability_of_state(state):\n p = compute_log_probability_of_text(state[\"text\"], state[\"char_to_ix\"], \n state[\"frequency_statistics\"], state[\"transition_matrix\"])\n \n return p",
"def cond_prob(self, token, prev_tokens=None):\n if not prev_tokens:\n assert self.n == 1\n prev_tokens = tuple()\n\n hits = self.count((tuple(prev_tokens)+(token,)))\n sub_count = self.count(tuple(prev_tokens))\n # heuristic\n return (hits+1) / (float(sub_count)+self.V())",
"def calc_p(self, context, seq):\n num_zeros, num_ones = _count_followers(context, seq)\n if num_zeros == 0 and num_ones == 0:\n return 1.0\n\n p0context = self.calc_p(\"0\" + context, seq)\n p1context = self.calc_p(\"1\" + context, seq)\n p_uncovered = 1.0\n if seq.startswith(context):\n # A bit will be uncovered by the child models,\n # if the 0context and 1context don't fit before it in the sequence.\n # The \"Extending the Context-Tree Weighting Method\" paper names\n # the p_uncovered as P^{epsilon s}.\n assert self.estimator(1, 0) == self.estimator(0, 1)\n p_uncovered = 0.5\n\n # The CTW estimate is the average\n # of the this context model and the model of its children.\n # The recursive averaging prefers simpler models.\n result = 0.5 * (\n self.estimator(num_zeros, num_ones) +\n p0context * p1context * p_uncovered)\n return result",
"def get_lexical_generation_prob(self, word, label):\n word = word.lower()\n numer = self.SMOOTHING_VALUE\n if word in self.words_labels_counts[label] and self.words_labels_counts[label][word] != 0:\n numer += self.words_labels_counts[label][word]\n elif word in self.words_labels_counts[label]:\n numer += self.words_labels_counts[label][self.UNKNOWN_TOKEN]\n denom = self.label_counts[label] + self.SMOOTHING_VALUE * self.all_grams.get_count()\n return float(numer) / denom",
"def cond_prob(self, token, prev_tokens=()):\n assert len(prev_tokens) < self._n\n if self.count(prev_tokens) == 0:\n return 0.0\n return float(self.count(list(prev_tokens) + [token])) / float(self.count(prev_tokens))",
"def prob(self, w):\n return self.counts[w] / self.total_count",
"def cond_prob(self, token, prev_tokens=()):\n return float(self.count(list(prev_tokens) + [token]) + 1) / float(self.count(prev_tokens) + self._V)",
"def next_word_proba(self, word, seq):\n context = tuple(seq[-2:]) # last two words\n return self.probas[context].get(word, 0.0)",
"def estimate_prob(self, history, word):\n\t\t# YOUR CODE HERE\n\n\t\tif history == '':\n\t\t\t# unigram\n\t\t\tword_frequency = self.ngram_counts[tuple([word])]\n\t\t\treturn word_frequency/self.total_counts\n\n\t\telse:\n\t\t\t# bigram\n\t\t\tword_frequency = self.ngram_counts[tuple([history, word])]\n\t\t\t# history_count = sum([self.ngram_counts[key] for key in self.ngram_counts if key[0] == history])\n\t\t\t# history_count = self.history_count[history]\n\t\t\thistory_count = self.ngram_counts[tuple([history])]\n\t\t\t# print('his: {}',format(history))\n\t\t\t# print('his count {}'.format(history_count))\n\t\t\treturn word_frequency/history_count",
"def eval_ppl(model, context, resp_gt, vocab):\n loss = 0\n num_tokens = 0\n num_unk = 0\n for i in range(len(resp_gt)):\n if resp_gt[i] in vocab:\n probs, eos_probs = model.next_word_probability(context, resp_gt[:i])\n prob_true = probs.get(resp_gt[i], 0)\n if prob_true > 0:\n prob_true /= (sum((probs.get(k, 0) for k in vocab)) + eos_probs)\n loss -= math.log(prob_true)\n else:\n loss = float('inf')\n num_tokens += 1\n else:\n num_unk += 1\n probs, eos_probs = model.next_word_probability(context, resp_gt)\n eos_probs /= (sum((probs.get(k, 0) for k in vocab)) + eos_probs)\n loss -= math.log(eos_probs)\n num_tokens += 1\n return loss / num_tokens, math.exp(loss / num_tokens)"
] | [
"0.8106743",
"0.75592023",
"0.74971807",
"0.7463757",
"0.7168955",
"0.7168955",
"0.7081482",
"0.707541",
"0.69993883",
"0.69632804",
"0.6952478",
"0.6929444",
"0.6909919",
"0.67921746",
"0.6687317",
"0.6680557",
"0.66741383",
"0.66102266",
"0.6583568",
"0.6526557",
"0.64608645",
"0.64588284",
"0.6452997",
"0.64493877",
"0.64414537",
"0.6421439",
"0.64033943",
"0.6386405",
"0.6361484",
"0.6350575"
] | 0.8385041 | 0 |
YOLOV3 network hybrid forward. | def hybrid_forward(self, F, x, *args):
all_box_centers = []
all_box_scales = []
all_objectness = []
all_class_pred = []
all_anchors = []
all_offsets = []
all_feat_maps = []
all_detections = []
routes = []
for stage, block, output in zip(self.stages, self.yolo_blocks, self.yolo_outputs):
x = stage(x)
routes.append(x)
# the YOLO output layers are used in reverse order, i.e., from very deep layers to shallow
for i, block, output in zip(range(len(routes)), self.yolo_blocks, self.yolo_outputs):
x, tip = block(x)
if autograd.is_training():
dets, box_centers, box_scales, objness, class_pred, anchors, offsets = output(tip)
all_box_centers.append(box_centers.reshape((0, -3, -1)))
all_box_scales.append(box_scales.reshape((0, -3, -1)))
all_objectness.append(objness.reshape((0, -3, -1)))
all_class_pred.append(class_pred.reshape((0, -3, -1)))
all_anchors.append(anchors)
all_offsets.append(offsets)
# here we use fake featmap to reduce memory consuption, only shape[2, 3] is used
fake_featmap = F.zeros_like(tip.slice_axis(
axis=0, begin=0, end=1).slice_axis(axis=1, begin=0, end=1))
all_feat_maps.append(fake_featmap)
else:
dets = output(tip)
all_detections.append(dets)
if i >= len(routes) - 1:
break
# add transition layers
x = self.transitions[i](x)
# upsample feature map reverse to shallow layers
upsample = _upsample(x, stride=2)
route_now = routes[::-1][i + 1]
x = F.concat(F.slice_like(upsample, route_now * 0, axes=(2, 3)), route_now, dim=1)
if autograd.is_training():
# during training, the network behaves differently since we don't need detection results
if autograd.is_recording():
# generate losses and return them directly
box_preds = F.concat(*all_detections, dim=1)
all_preds = [F.concat(*p, dim=1) for p in [
all_objectness, all_box_centers, all_box_scales, all_class_pred]]
all_targets = self._target_generator(box_preds, *args)
return self._loss(*(all_preds + all_targets))
# return raw predictions, this is only used in DataLoader transform function.
return (F.concat(*all_detections, dim=1), all_anchors, all_offsets, all_feat_maps,
F.concat(*all_box_centers, dim=1), F.concat(*all_box_scales, dim=1),
F.concat(*all_objectness, dim=1), F.concat(*all_class_pred, dim=1))
# concat all detection results from different stages
result = F.concat(*all_detections, dim=1)
# apply nms per class
if self.nms_thresh > 0 and self.nms_thresh < 1:
result = F.contrib.box_nms(
result, overlap_thresh=self.nms_thresh, valid_thresh=0.01,
topk=self.nms_topk, id_index=0, score_index=1, coord_start=2, force_suppress=False)
if self.post_nms > 0:
result = result.slice_axis(axis=1, begin=0, end=self.post_nms)
ids = result.slice_axis(axis=-1, begin=0, end=1)
scores = result.slice_axis(axis=-1, begin=1, end=2)
bboxes = result.slice_axis(axis=-1, begin=2, end=None)
return ids, scores, bboxes | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def forward(self, x):\n for name, module in self.base._modules.items():\n if name == 'avgpool':\n break\n\n if name == 'layer3':\n l2 = Variable(x)\n\n x = Variable(module(x))\n l4 = Variable(x)\n\n \"\"\"for name, param in self.base.named_parameters():\n print(name, param.size())\n\n res50_model = self.base\n res50_conv2 = ResNet50Bottom(res50_model)\n for i,child in enumerate(self.base.children()):\n print(i)\n if i==8:\n l4=x\n break\n if i==6:\n l2=x\n x=res50_conv2(x.detach())\"\"\"\n\n s2 = l2.sum(1) #/ 100\n #\n s4 = l4.sum(1) #/ 1000\n\n\n sw2 = s2 / (s2.view(x.size(0), -1).sum(1)).unsqueeze(1).unsqueeze(2)\n\n sw4 = s4 / (s4.view(x.size(0), -1).sum(1)).unsqueeze(1).unsqueeze(2)\n\n\n l2 = l2 * sw2.unsqueeze(1)\n l4 = l4 * sw4.unsqueeze(1)\n\n \n c2 = self.inconv2(l2)\n c4 = self.inconv4(l4)\n c2 = self.bn2(c2)\n c4 = self.bn4(c4)\n \n n2 = F.softmax(torch.mean(torch.mean(c2, dim=2), dim=2), dim=1)\n n4 = F.softmax(torch.mean(torch.mean(c4, dim=2), dim=2), dim=1)\n nn2 = n2.data.cpu().numpy()\n nn4 = n4.data.cpu().numpy()\n cam2 = np.zeros((x.size(0), 28, 28), dtype=float)\n cam4 = np.zeros((x.size(0), 7, 7), dtype=float)\n\n\n for i in range(0, x.size(0)):\n for j in range(0, 2):\n temp1 = c2[i, j, :, :].data.cpu().numpy()\n temp1 = np.maximum(temp1, 0)\n temp1 = temp1 - np.min(temp1)\n temp1 = temp1 / (np.max(temp1)+1e-8)\n cam2[i] = cam2[i] + nn2[i, j] * temp1\n cam2 = torch.FloatTensor(cam2)\n l2 = l2 * (cam2.unsqueeze(1).cuda())\n l2 = self.stack1(l2)\n l2 = self.stack1_1(l2)\n\n for i in range(0, x.size(0)):\n for j in range(0, 8):\n temp2 = c4[i, j, :, :].data.cpu().numpy()\n temp2 = np.maximum(temp2, 0)\n temp2 = temp2 - np.min(temp2)\n temp2 = temp2 / (np.max(temp2)+1e-8)\n cam4[i] =cam4[i] + nn4[i, j] * temp2\n cam4 = torch.FloatTensor(cam4)\n l4 = l4 * cam4.unsqueeze(1).cuda()\n l4 = self.stack3(l4)\n X = l2.view(x.size(0), 512, 7 ** 2)\n Y = l4.view(x.size(0), 512, 7 ** 2)\n Z = self.cross_bilinear(X, Y)\n return n2, n4, Z",
"def forward(self, x):\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n\n x = self.pool1(F.relu(self.batch1(self.conv1(x))))\n x = self.pool2(F.relu(self.batch2(self.conv2(x))))\n x = F.relu(self.batch3a(self.conv3a(x)))\n x = self.pool3(F.relu(self.batch3b(self.conv3b(x))))\n x = F.relu(self.batch4a(self.conv4a(x)))\n x = self.pool4(F.relu(self.batch4b(self.conv4b(x))))\n x = F.relu(self.batch5a(self.conv5a(x)))\n x = self.pool5(F.relu(self.batch5b(self.conv5b(x))))\n x = self.avgpool(x)\n x = x.reshape(x.shape[0], -1)\n out = self.fc1(x)\n\n# raise NotImplementedError\n ########################\n # END OF YOUR CODE #\n #######################\n\n return out",
"def forward(self, x):\n sources = list()\n loc = list()\n conf = list()\n\n # apply vgg up to conv4_3 relu\n #print('Reached start of vgg')\n for k in self.vgg._modules.keys():\n if int(k) < 23:\n #print('Reached ' + k + ' ', x.size())\n x = self.vgg._modules[k].cuda()(x)\n #print('Reached L2Norm')\n s = self.L2Norm(x)\n sources.append(s)\n\n #print('Reached after L2Norm')\n # apply vgg up to fc7\n for k in self.vgg._modules.keys():\n if int(k) >= 23:\n #print('Reached ' + k + ' ', x.size())\n x = self.vgg._modules[k].cuda()(x)\n sources.append(x)\n #print('Reached end of VGG')\n\n # apply extra layers and cache source layer outputs\n for k, v in enumerate(self.extras):\n x = F.relu(v(x), inplace=True)\n if k % 2 == 1:\n sources.append(x)\n\n # apply multibox head to source layers\n for (x, l, c) in zip(sources, self.loc, self.conf):\n loc.append(l(x).permute(0, 2, 3, 1).contiguous())\n conf.append(c(x).permute(0, 2, 3, 1).contiguous())\n\n loc = torch.cat([o.view(o.size(0), -1) for o in loc], 1)\n conf = torch.cat([o.view(o.size(0), -1) for o in conf], 1)\n\n if self.phase == \"test\":\n output = self.detect(\n loc.view(loc.size(0), -1, 4), # loc preds\n self.softmax(conf.view(-1, self.num_classes)), # conf preds\n self.priors # default boxes\n )\n else:\n output = (\n loc.view(loc.size(0), -1, 4),\n conf.view(conf.size(0), -1, self.num_classes),\n self.priors\n )\n return output",
"def forward(self, X, batch_size):\n\n z = self.neural_net_forward(X.view(-1, self.n_hos * self.n_types)) # [batch_size, n_structures]\n\n x_1 = self.linear_program_forward(X, z, batch_size)\n\n return x_1",
"def forward(self, x): \n pal1_sources = list()\n pal2_sources = list()\n loc_pal1 = list()\n conf_pal1 = list()\n loc_pal2 = list()\n conf_pal2 = list()\n\n # apply vgg up to conv3_3 relu\n for k in range(16):\n x = self.vgg[k](x)\n\n of1 = x\n s = self.L2Normof1(of1)\n pal1_sources.append(s)\n \n # apply vgg up to conv4_3 relu\n for k in range(16, 23):\n x = self.vgg[k](x)\n\n of2 = x\n s = self.L2Normof2(of2)\n pal1_sources.append(s)\n\n # apply vgg up to conv5_3 relu\n for k in range(23, 30):\n x = self.vgg[k](x)\n of3 = x\n s = self.L2Normof3(of3)\n pal1_sources.append(s)\n\n # apply vgg up to fc7\n for k in range(30, len(self.vgg)):\n x = self.vgg[k](x)\n of4 = x\n pal1_sources.append(of4)\n \n # apply extra layers and cache source layer outputs\n for k in range(2):\n x = F.relu(self.extras[k](x), inplace=True)\n of5 = x\n pal1_sources.append(of5)\n for k in range(2, 4):\n x = F.relu(self.extras[k](x), inplace=True)\n of6 = x\n pal1_sources.append(of6)\n\n ## fpn module\n \"\"\"\n lfpn6 = self.fpn_topdown6(of6)\n lfpn5 = self._upsample_product(self.fpn_topdown5(of6), self.fpn_latlayer5(of5))\n lfpn4 = self._upsample_product(self.fpn_topdown4(of5), self.fpn_latlayer4(of4))\n lfpn3 = self._upsample_product(self.fpn_topdown3(of4), self.fpn_latlayer3(of3))\n lfpn2 = self._upsample_product(self.fpn_topdown2(of3), self.fpn_latlayer2(of2))\n lfpn1 = self._upsample_product(self.fpn_topdown1(of2), self.fpn_latlayer1(of1))\n\n\n ef1 = self.fpn_fem3_3(lfpn1)\n ef1 = self.L2Normef1(ef1)\n ef2 = self.fpn_fem4_3(lfpn2)\n ef2 = self.L2Normef2(ef2)\n ef3 = self.fpn_fem5_3(lfpn3)\n ef3 = self.L2Normef3(ef3)\n\n ef4 = self.fpn_fem7(lfpn4)\n ef5 = self.fpn_fem6_2(lfpn5)\n ef6 = self.fpn_fem7_2(lfpn6)\n \"\"\"\n\n conv7 = F.relu(self.fpn_topdown[0](of6), inplace=True)\n x = F.relu(self.fpn_topdown[1](conv7), inplace=True)\n conv6 = F.relu(self._upsample_product(x, self.fpn_latlayer[0](of5)), inplace=True)\n\n x = F.relu(self.fpn_topdown[2](conv6), inplace=True)\n convfc7_2 = F.relu(self._upsample_product(x, self.fpn_latlayer[1](of4)), inplace=True)\n\n x = F.relu(self.fpn_topdown[3](convfc7_2), inplace=True)\n conv5 = F.relu(self._upsample_product(x, self.fpn_latlayer[2](of3)), inplace=True)\n\n x = F.relu(self.fpn_topdown[4](conv5), inplace=True)\n conv4 = F.relu(self._upsample_product(x, self.fpn_latlayer[3](of2)), inplace=True)\n\n x = F.relu(self.fpn_topdown[5](conv4), inplace=True)\n conv3 = F.relu(self._upsample_product(x, self.fpn_latlayer[4](of1)), inplace=True)\n\n ef1 = self.fpn_fem[0](conv3)\n ef1 = self.L2Normef1(ef1)\n ef2 = self.fpn_fem[1](conv4)\n ef2 = self.L2Normef2(ef2)\n ef3 = self.fpn_fem[2](conv5)\n ef3 = self.L2Normef3(ef3)\n ef4 = self.fpn_fem[3](convfc7_2)\n ef5 = self.fpn_fem[4](conv6)\n ef6 = self.fpn_fem[5](conv7)\n\n pal2_sources = (ef1, ef2, ef3, ef4, ef5, ef6)\n\n ## first shot \n for (x, l, c) in zip(pal1_sources, self.loc_pal1, self.conf_pal1):\n loc_pal1.append(l(x).permute(0, 2, 3, 1).contiguous())\n conf_pal1.append(c(x).permute(0, 2, 3, 1).contiguous())\n \n ## second shot\n for (x, l, c) in zip(pal2_sources, self.loc_pal2, self.conf_pal2):\n loc_pal2.append(l(x).permute(0, 2, 3, 1).contiguous())\n conf_pal2.append(c(x).permute(0, 2, 3, 1).contiguous())\n\n # first shot\n loc_pal1 = torch.cat([o.view(o.size(0), -1) for o in loc_pal1], 1)\n conf_pal1 = torch.cat([o.view(o.size(0), -1) for o in conf_pal1], 1)\n \n # second shot\n loc_pal2 = torch.cat([o.view(o.size(0), -1) for o in loc_pal2], 1)\n conf_pal2 = torch.cat([o.view(o.size(0), -1) for o in conf_pal2], 1)\n\n if self.phase == 'test':\n # 测试时, 仅使用shot2 的输出\n output = self.detect(\n loc_pal2.view(loc_pal2.size(0), -1, 4),\n self.softmax(conf_pal2.view(conf_pal2.size(0), -1,\n self.num_classes)), # conf preds\n )\n else:\n ## 训练时,使用shot1 和 shot2 的输出\n output = (\n loc_pal1.view(loc_pal1.size(0), -1, 4),\n conf_pal1.view(conf_pal1.size(0), -1, self.num_classes),\n loc_pal2.view(loc_pal2.size(0), -1, 4),\n conf_pal2.view(conf_pal2.size(0), -1, self.num_classes))\n return output",
"def L_model_forward(X, parameters):\n pass",
"def forward(network: dict, x: np.array) -> np.array:\n W1, W2, W3 = network['W1'], network['W2'], network['W3']\n b1, b2, b3 = network['b1'], network['b2'], network['b3']\n z1 = _forward(x, W1, b1, 'sigmoid')\n z2 = _forward(z1, W2, b2, 'sigmoid')\n y = _forward(z2, W3, b3, 'identity')\n return y",
"def hybrid_forward(self, F, x):\n identity = x\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n out = self.conv3(out)\n out = self.bn3(out)\n if self.downsample is not None:\n identity = self.downsample(x)\n out = F.Activation(out + identity, act_type='relu')\n\n if self.nonlocal_block is not None:\n out = self.nonlocal_block(out)\n return out",
"def forward(self, x):\n return self.net(x)",
"def forward(self, x):\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n for l in range(len(self.layers)):\n if l == 0:\n z = self.layers[l].forward(x)\n else:\n z = self.layers[l].forward(a)\n a = self.activations[l].forward(z)\n\n # output from softmax layer\n out = a\n\n ########################\n # END OF YOUR CODE #\n #######################\n\n return out",
"def _forward(self, X):\n firstLayer = True\n for layer, fcn in self.model.named_children():\n if 'recurrent' in layer:\n if firstLayer:\n Y, hidden = fcn(X)\n else:\n Y, hidden = fcn(Y)\n elif 'dropout' in layer:\n Y = fcn(Y)\n elif 'linear' in layer:\n Y = fcn(Y.view((Y.shape[1], Y.shape[0]*Y.shape[-1])))\n else:\n Y = fcn(Y)\n\n firstLayer = False\n\n return Y",
"def _forward(self, x):\n global global_epoch\n global_epoch += 1\n bias = -np.ones((x.shape[0], 1))\n tail = np.zeros((x.shape[0], self.dim_hid+self.dim_out))\n nodes = np.concatenate((bias, x, tail), axis=1)\n weight = self.weight * self.connectivity\n for i in range(self.dim_in, self.dim_in+self.dim_hid+self.dim_out):\n net = nodes.dot(weight[i])\n nodes[:,i] = self.__sigmoid(net)\n nodes[:,self.dim_in:self.dim_in+self.dim_hid] *= self.hidden\n return nodes",
"def forward(self, x):\n assert not torch.isnan(x).any(), f\"NaN in input {x}\"\n x = F.relu(self.l1(x))\n x = F.relu(self.l2(x))\n x = self.l3(x)\n return x",
"def forward_pass(X,architecture):\n \n architecture['layer1'][0] = X\n kernel_shape1 = architecture['layer1'][7]\n stride1 = architecture['layer1'][8]\n if kernel_shape1 is not None and not isinstance(kernel_shape1,int):\n X_input_1_im2col,imX = im2col(X,kernel_shape1,stride1,im_needed = False, shape_specified = True)\n architecture['layer1'][4] = X_input_1_im2col\n else:\n architecture['layer1'][4] = None\n\n for layer in range(len(architecture)): # Feedforward from the first till the second last layer\n X_input,X_output,weightsi,biasi,X_input_1_im2col,imi,output_shapei,kernel_shapei,stridei,operationi,imx = architecture['layer{}'.format(layer+1)]\n\n if operationi == 'conv_bn_relu':\n conv_output = relu(BatchNorm(torch.t(X_input_1_im2col).mm(weightsi) + biasi))\n conv_output = torch.reshape(conv_output,output_shapei)\n architecture['layer{}'.format(layer+1)][1] = conv_output # resetting output as convolved shape\n if layer != len(architecture) - 1:\n architecture['layer{}'.format(layer+2)][0] = conv_output # resetting intput of next layer as convolved shape\n kernel_shapei__1 = architecture['layer{}'.format(layer+2)][7]\n stridei__1 = architecture['layer{}'.format(layer+2)][8]\n operationi__1 = architecture['layer{}'.format(layer+2)][9]\n if kernel_shapei__1 is not None and not isinstance(kernel_shapei__1,int):\n if operationi__1 == 'maxpool':\n architecture['layer{}'.format(layer+2)][4] = maxpool_im2col(conv_output,kernel_shapei__1,stridei__1)\n else:\n architecture['layer{}'.format(layer+2)][4],imX = im2col(conv_output,kernel_shapei__1,stridei__1,im_needed = False, shape_specified = True)\n # resetting input im2col of next layer as the im2col of the output of this layer\n else:\n architecture['layer{}'.format(layer+2)][4] = None\n elif operationi == 'conv_relu':\n conv_output = relu(torch.t(X_input_1_im2col).mm(weightsi) + biasi)\n conv_output = torch.reshape(conv_output,output_shapei)\n architecture['layer{}'.format(layer+1)][1] = conv_output # resetting output as convolved shape\n if layer != len(architecture) - 1:\n architecture['layer{}'.format(layer+2)][0] = conv_output # resetting intput of next layer as convolved shape\n kernel_shapei__1 = architecture['layer{}'.format(layer+2)][7]\n stridei__1 = architecture['layer{}'.format(layer+2)][8]\n operationi__1 = architecture['layer{}'.format(layer+2)][9]\n if kernel_shapei__1 is not None and not isinstance(kernel_shapei__1,int):\n if operationi__1 == 'maxpool':\n architecture['layer{}'.format(layer+2)][4] = maxpool_im2col(conv_output,kernel_shapei__1,stridei__1)\n else:\n architecture['layer{}'.format(layer+2)][4],imX = im2col(conv_output,kernel_shapei__1,stridei__1,im_needed = False, shape_specified = True)\n # resetting input im2col of next layer as the im2col of the output of this layer\n else:\n architecture['layer{}'.format(layer+2)][4] = None\n elif operationi == 'conv_bn_sigmoid':\n conv_output = sigmoid(BatchNorm(torch.t(X_input_1_im2col).mm(weightsi) + biasi))\n conv_output = torch.reshape(conv_output,output_shapei)\n architecture['layer{}'.format(layer+1)][1] = conv_output # resetting output as convolved shape\n if layer != len(architecture) - 1:\n architecture['layer{}'.format(layer+2)][0] = conv_output # resetting intput of next layer as convolved shape\n kernel_shapei__1 = architecture['layer{}'.format(layer+2)][7]\n stridei__1 = architecture['layer{}'.format(layer+2)][8]\n operationi__1 = architecture['layer{}'.format(layer+2)][9]\n if kernel_shapei__1 is not None and not isinstance(kernel_shapei__1,int):\n if operationi__1 == 'maxpool':\n architecture['layer{}'.format(layer+2)][4] = maxpool_im2col(conv_output,kernel_shapei__1,stridei__1)\n else:\n architecture['layer{}'.format(layer+2)][4],imX = im2col(conv_output,kernel_shapei__1,stridei__1,im_needed = False, shape_specified = True)\n # resetting input im2col of next layer as the im2col of the output of this layer\n else:\n architecture['layer{}'.format(layer+2)][4] = None\n elif operationi == 'conv_sigmoid':\n conv_output = sigmoid(torch.t(X_input_1_im2col).mm(weightsi) + biasi)\n conv_output = torch.reshape(conv_output,output_shapei)\n architecture['layer{}'.format(layer+1)][1] = conv_output # resetting output as convolved shape\n if layer != len(architecture) - 1:\n architecture['layer{}'.format(layer+2)][0] = conv_output # resetting intput of next layer as convolved shape\n kernel_shapei__1 = architecture['layer{}'.format(layer+2)][7]\n stridei__1 = architecture['layer{}'.format(layer+2)][8]\n operationi__1 = architecture['layer{}'.format(layer+2)][9]\n if kernel_shapei__1 is not None and not isinstance(kernel_shapei__1,int):\n if operationi__1 == 'maxpool':\n architecture['layer{}'.format(layer+2)][4] = maxpool_im2col(conv_output,kernel_shapei__1,stridei__1)\n else:\n architecture['layer{}'.format(layer+2)][4],imX = im2col(conv_output,kernel_shapei__1,stridei__1,im_needed = False, shape_specified = True)\n # resetting input im2col of next layer as the im2col of the output of this layer\n else:\n architecture['layer{}'.format(layer+2)][4] = None\n elif operationi == 'maxpool':\n maxpool_output = maxpool(X_input,kernel_shapei,stridei)\n\n maxpool_output = torch.reshape(maxpool_output,output_shapei)\n\n if layer != len(architecture) - 1:\n architecture['layer{}'.format(layer+2)][0] = maxpool_output\n kernel_shapei__1 = architecture['layer{}'.format(layer+2)][7]\n stridei__1 = architecture['layer{}'.format(layer+2)][8]\n if kernel_shapei__1 is not None and not isinstance(kernel_shapei__1,int):\n architecture['layer{}'.format(layer+2)][4],imX = im2col(maxpool_output,kernel_shapei__1,stridei__1,im_needed = False, shape_specified = True)\n else:\n architecture['layer{}'.format(layer+2)][4] = None\n elif operationi == 'flatten_dense_relu':\n # kernel_shapei in this case refers to the output channels: stride for dense layer will be None\n output = flatten_and_dense(X_input,kernel_shapei,weightsi,biasi,activation = 'relu',initialise_weights = False)\n architecture['layer{}'.format(layer+1)][1] = output\n if layer != len(architecture) - 1:\n architecture['layer{}'.format(layer+2)][0] = output\n elif operationi == 'flatten_dense_none':\n # kernel_shapei in this case refers to the output channels: stride for dense layer will be None\n output = flatten_and_dense(X_input,kernel_shapei,weightsi,biasi,activation = 'none',initialise_weights = False)\n architecture['layer{}'.format(layer+1)][1] = output\n if layer != len(architecture) - 1:\n architecture['layer{}'.format(layer+2)][0] = output\n elif operationi == 'flatten_dense_sigmoid':\n # kernel_shapei in this case refers to the output channels: stride for dense layer will be None\n output = flatten_and_dense(X_input,kernel_shapei,weightsi,biasi,activation = 'sigmoid',initialise_weights = False)\n architecture['layer{}'.format(layer+1)][1] = output\n if layer != len(architecture) - 1:\n architecture['layer{}'.format(layer+2)][0] = output\n elif operationi == 'softmax':\n Xin = architecture['layer{}'.format(layer+1)][0]\n output = softmax(Xin).squeeze()\n architecture['layer{}'.format(layer+1)][1] = output\n if layer == len(architecture) - 1:\n y_pred = architecture['layer{}'.format(len(architecture))][1]\n \n return y_pred",
"def forward(self, x):\r\n # Is it possible to calculate the acc and gyro convolution in parallel??? TO DO\r\n # split x\r\n x_split = torch.split(x, 8, dim=3)\r\n # acc\r\n acc_out = F.relu(self.acc_bn1(self.acc_conv1(x_split[0])))\r\n acc_out = F.relu(self.acc_bn2(self.acc_conv2(acc_out)))\r\n # gyro\r\n gyro_out = F.relu(self.gyro_bn1(self.gyro_conv1(x_split[1])))\r\n gyro_out = F.relu(self.gyro_bn2(self.gyro_conv2(gyro_out)))\r\n\r\n sensor_data = torch.cat([acc_out, gyro_out], 3)\r\n out = F.relu(self.bn3(self.conv3(sensor_data)))\r\n return out",
"def forward(self, inputs):\n\n down0 = self.layer_0(inputs=inputs)\n down1 = self.layer_1(inputs=down0)\n down2 = self.layer_2(inputs=down1)\n down3 = self.layer_3(inputs=down2)\n down4 = self.layer_4(inputs=down3)\n\n up1 = self.layer_7(down4, down3)\n\n up2 = self.layer_8(up1, down2)\n\n up3 = self.layer_9(up2, down1)\n\n up4 = self.layer_10(up3, down0)\n\n up5 = self.layer_11(up4)\n return up5",
"def forward(self, x, y=None):\n # propagate networks\n self.prior_latent_distribution = self.prior(x)\n self.unet_features = self.unet.forward(x)\n if y is not None:\n y_onehot = F.one_hot(\n y[:, 0], num_classes=self.num_classes).permute(0, -1, 1, 2)\n xy = torch.cat([x, y_onehot], dim=1)\n self.posterior_latent_distribution = self.posterior(xy)\n\n # sample latent\n if y is not None:\n self.z = self.posterior_latent_distribution.rsample()\n else:\n self.z = self.prior_latent_distribution.sample()\n\n # reconstruct image\n self.y_hat_raw = self.fcomb(self.unet_features, self.z)\n\n return self.y_hat_raw",
"def forward(self):\n self.img_gen, self.loss_reg, self.parsav = self.net_G(self.input_P1, self.input_P2, self.input_BP1, self.input_BP2, self.input_SPL1, self.input_SPL2)",
"def forward(self, x):\n sources = list()\n loc = list()\n conf = list()\n\n # apply vgg up to conv4_3 relu\n for k in range(23):\n x = self.vgg[k](x)\n\n s = self.L2Norm(x)\n sources.append(s)\n\n # apply vgg up to fc7\n for k in range(23, len(self.vgg)):\n x = self.vgg[k](x)\n sources.append(x)\n\n # apply extra layers and cache source layer outputs\n for k, v in enumerate(self.extras):\n x = F.relu(v(x), inplace=True)\n if k % 2 == 1:\n sources.append(x)\n\n # apply multibox head to source layers\n for (x, l, c) in zip(sources, self.loc, self.conf):\n loc.append(l(x).permute(0, 2, 3, 1).contiguous())\n conf.append(c(x).permute(0, 2, 3, 1).contiguous())\n\n loc = torch.cat([o.view(o.size(0), -1) for o in loc], 1)\n conf = torch.cat([o.view(o.size(0), -1) for o in conf], 1)\n if self.phase == \"test\":\n output = self.detect(\n loc.view(loc.size(0), -1, 4), # loc preds\n self.softmax(conf.view(conf.size(0), -1,\n self.num_classes)), # conf preds\n self.priors.type(type(x.data)) # default boxes\n )\n else:\n output = (\n loc.view(loc.size(0), -1, 4),\n conf.view(conf.size(0), -1, self.num_classes),\n self.priors\n )\n return output",
"def forward(self, x):\n sources = list()\n new_sources = list()\n\n # apply lds to the initial image\n x_pool = self.lds(x)\n\n # apply vgg up to conv4_3\n for k in range(22):\n x = self.features[k](x)\n conv4_3_bn = self.ibn1(x)\n x_pool1_skip, x_pool1_icn = self.icn1(x_pool)\n s = self.Norm1(conv4_3_bn * x_pool1_icn)\n\n # apply vgg up to fc7\n for k in range(22, 34):\n x = self.features[k](x)\n conv7_bn = self.ibn2(x)\n x_pool2_skip, x_pool2_icn = self.icn2(x_pool1_skip)\n p = self.Norm2(self.dsc1(s) + conv7_bn * x_pool2_icn)\n\n x = self.features[34](x)\n\n # apply extra layers and cache source layer outputs\n for k, v in enumerate(self.extra):\n x = v(x)\n if k == 0:\n x_pool3_skip, x_pool3_icn = self.icn3(x_pool2_skip)\n w = self.Norm3(self.dsc2(p) + x * x_pool3_icn)\n elif k == 2:\n x_pool4_skip, x_pool4_icn = self.icn4(x_pool3_skip)\n q = self.Norm4(self.dsc3(w) + x * x_pool4_icn)\n elif k == 4:\n o = self.Norm5(self.dsc4(q) + x)\n sources.append(o)\n elif k == 7 or k == 9:\n sources.append(x)\n else:\n pass\n\n # project the forward features into lower dimension.\n tmp1 = self.proj1(p)\n tmp2 = self.proj2(w)\n tmp3 = self.proj3(q)\n tmp4 = self.proj4(o)\n\n # The conv4_3 level\n proj1 = F.upsample(tmp1, scale_factor=2, mode='bilinear')\n proj2 = F.upsample(tmp2, scale_factor=4, mode='bilinear')\n proj3 = F.upsample(tmp3, scale_factor=8, mode='bilinear')\n proj4 = F.upsample(tmp4, scale_factor=16, mode='bilinear')\n proj = torch.cat([proj1, proj2, proj3, proj4], dim=1)\n\n agent1 = self.agent1(s)\n\n convert1 = self.convert1(proj)\n pred1 = torch.cat([agent1, convert1], dim=1)\n pred1 = self.merge1(pred1)\n new_sources.append(pred1)\n\n # The fc_7 level\n proj2 = F.upsample(tmp2, scale_factor=2, mode='bilinear')\n proj3 = F.upsample(tmp3, scale_factor=4, mode='bilinear')\n proj4 = F.upsample(tmp4, scale_factor=8, mode='bilinear')\n proj = torch.cat([proj2, proj3, proj4], dim=1)\n\n agent2 = self.agent2(p)\n convert2 = self.convert2(proj)\n pred2 = torch.cat([agent2, convert2], dim=1)\n pred2 = self.merge2(pred2)\n new_sources.append(pred2)\n\n # The conv8 level\n proj3 = F.upsample(tmp3, scale_factor=2, mode='bilinear')\n proj4 = F.upsample(tmp4, scale_factor=4, mode='bilinear')\n proj = torch.cat([proj3, proj4], dim=1)\n\n agent3 = self.agent3(w)\n convert3 = self.convert3(proj)\n pred3 = torch.cat([agent3, convert3], dim=1)\n pred3 = self.merge3(pred3)\n new_sources.append(pred3)\n\n # The conv9 level\n proj4 = F.upsample(tmp4, scale_factor=2, mode='bilinear')\n proj = proj4\n\n agent4 = self.agent4(q)\n convert4 = self.convert4(proj)\n pred4 = torch.cat([agent4, convert4], dim=1)\n pred4 = self.merge4(pred4)\n new_sources.append(pred4)\n\n for prediction in sources:\n new_sources.append(prediction)\n\n return new_sources",
"def forward(self, x):\n\n\t\t## Conv layers\n\t\tx = self.avgpool(F.tanh(self.conv1(x)))\n\t\tx = self.avgpool(F.tanh(self.conv2(x)))\n\t\tx = F.tanh(self.conv3(x))\n\n\t\t## Flatten\n\t\tx = x.view(x.size(0), -1)\n\n\t\t## Fully connected layers\n\t\tx = F.tanh(self.fc1(x))\n\t\tx = self.fc2(x)\n\n\t\tx = F.softmax(x, dim=1)\n\n\t\treturn x",
"def forward(self, x: torch.Tensor) -> torch.Tensor:\n model_output = None\n #######################################################################\n # Student code begins\n #######################################################################\n\n (N,C,H,W) = x.shape\n\n conv_features = self.conv_layers(x)\n \n flat_features = conv_features.reshape(-1, 500)\n model_output = self.fc_layers(flat_features)\n\n\n #######################################################################\n # Student code ends\n #######################################################################\n return model_output",
"def forward(self, x): \n # Layer 1\n x = F.elu(self.conv1(x)) # bsize x l1_channels x 1 x Nsamples\n x = self.batchnorm1(x)\n x = F.dropout(x, 0.25)\n x = x.permute(0, 2, 1, 3) # bsize x 1 x l1_channels x Nsamples\n\n # Layer 2\n x = self.padding1(x)\n x = F.elu(self.conv2(x)) # bsize x l2_channels x l1_channels x Nsamples\n x = self.batchnorm2(x) \n x = F.dropout(x, 0.25)\n x = self.pooling2(x) # bsize x l2_channels x floor(l1_channels/2) x floor(Nsamples/4)\n\n # Layer 3\n x = self.padding2(x)\n x = F.elu(self.conv3(x)) # bsize x l3_channels x floor(l1_channels/2) x floor(Nsamples/4)\n x = self.batchnorm3(x)\n x = F.dropout(x, 0.25)\n x = self.pooling3(x) # bsize x l3_channels x floor(l1_channels/4) x floor(Nsamples/16)\n\n # Fully-connected Layer\n x = x.view(-1, self.fc1.in_features) # bsize x (l3_channels*floor(l1_channels/4)*floor(Nsamples/16))\n x = F.sigmoid(self.fc1(x)) # bisze x self.fc1.out_features \n \n if self.fc1.out_features == 1:\n x = x.view(-1) # bsize (1D if 1 output unit)\n \n return x",
"def go_forward(net):\n global w, back_loss, loss, l2_loss\n start_forward_time = time.time()\n\n # feed in data\n P = net(w).t()\n\n # calculate loss\n Y = P.mv(X)\n Ybar = Y.mean()\n back_loss = (Y - Ybar).norm(1) / (J)\n loss = back_loss / Ybar\n l2_loss = ((Y - Ybar).norm(2) ** 2) / (J * Ybar)\n\n return time.time() - start_forward_time",
"def forward(self, x):\n\n def run0(x, dummy):\n lout1 = self.lconv1(x)\n out1 = self.conv1(lout1)\n lout2 = self.lconv2(out1 + lout1)\n out2 = self.conv2(lout2)\n lout3 = self.lconv3(out2 + lout2)\n out3 = self.conv3(lout3)\n lout4 = self.lconv4(out3 + lout3)\n out4 = self.conv4(lout4)\n lout5 = self.lconv5(out4 + lout4)\n out5 = self.conv5(lout5)\n lout6 = self.lconv6(out5 + lout5)\n out6 = self.conv6(lout6)\n lout7 = self.lconv7(out6 + lout6)\n out7 = self.conv7(lout7)\n mat = out7[:, :, :, None] + out7[:, :, None, :]\n cur = mat\n if self.num_1d:\n output1d = self.final_1d(out7)\n return cur, output1d\n else:\n return cur\n\n dummy = torch.Tensor(1)\n dummy.requires_grad = True\n if self.num_1d:\n cur, output1d = checkpoint(run0, x, dummy)\n else:\n cur = checkpoint(run0, x, dummy)\n\n def run1(cur):\n first = True\n for lm, m in zip(self.lconvtwos[:7], self.convtwos[:7]):\n if first:\n cur = lm(cur)\n\n first = False\n else:\n cur = lm(cur) + cur\n cur = m(cur) + cur\n return cur\n\n def run2(cur):\n for lm, m in zip(self.lconvtwos[7:13], self.convtwos[7:13]):\n cur = lm(cur) + cur\n cur = m(cur) + cur\n return cur\n\n def run3(cur):\n for lm, m in zip(self.lconvtwos[13:], self.convtwos[13:]):\n cur = lm(cur) + cur\n cur = m(cur) + cur\n\n cur = self.final(cur)\n cur = 0.5 * cur + 0.5 * cur.transpose(2, 3)\n return cur\n\n cur = checkpoint(run1, cur)\n cur = checkpoint(run2, cur)\n cur = checkpoint(run3, cur)\n\n if self.num_1d:\n return cur, output1d\n else:\n return cur",
"def forward(self, x):\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n out = self.maxpool(out)\n out = self.avgpool(out)\n out = self.layer1(out)\n out = self.layer2(out)\n out = self.layer3(out)\n out = self.layer4(out)\n out = self.avgpool(out)\n out = torch.flatten(out, 1)\n out = self.fc(out)\n return out",
"def forward_propagate(self, X=[]):\n A = np.zeros((len(self.architecture)), dtype=object)\n if np.size(X) > 0:\n A[0] = X\n else:\n A[0] = self.input\n\n self.all_data[f'A0'] = A[0]\n \n for layer, activation_function in zip(range(1, len(self.architecture)),self.activations):\n Z = (A[layer-1].dot(self.weights_and_biases[f'W{layer}']) + self.weights_and_biases[f'b{layer}'])\n activation_function = self.activations[layer-1]\n A[layer] = self.activation(Z,type=activation_function)\n self.all_data[f'Z{layer}'] = Z\n self.all_data[f'A{layer}'] = A[layer]\n \n y_predicted = A[layer]\n \n return y_predicted",
"def forward(self, data):\n b, _, _ = data.size()\n # encode process\n skip1 = data\n out1 = self.conv1(data)\n\n out1 = self.relu(out1)\n out1 = self.pool1(out1)\n out1 = self.dropout(out1)\n skip2 = out1\n out1 = self.conv2(out1)\n out1 = self.relu(out1)\n out1 = self.pool2(out1)\n out1 = self.dropout(out1)\n skip3 = out1\n out1 = self.conv3(out1)\n out1 = self.relu(out1)\n out1 = self.pool3(out1)\n out1 = self.dropout(out1)\n skip4 = out1\n up5 = self.aap(out1)\n # decode process\n up4 = upsample(up5, skip4.size()[-1])\n up4 = up4 + skip4\n up4 = self.blend4(up4)\n up3 = upsample(up4, skip3.size()[-1])\n up3 = up3 + skip3\n up3 = self.blend3(up3)\n up2 = upsample(up3, skip2.size()[-1])\n up2 = up2 + skip2\n up2 = self.blend2(up2)\n up1 = upsample(up2, skip1.size()[-1])\n up1 = up1 + skip1\n up1 = self.blend1(up1)\n out_dense = self.sigmoid(up1)\n out_dense = out_dense.view(b, -1)\n\n return out_dense",
"def forward(self, x):\n x1, x2 = x\n y1 = self.conv_net.forward(x1)\n y2 = self.sparse_net.forward(x2)\n return y1, y2",
"def forward(self, x):\r\n x = self.conv1(x)\r\n x = self.conv1_BN(x)\r\n x = F.relu(x)\r\n x = self.conv1_dp(x)\r\n x = self.Block2_1(x)\r\n x = self.Block2_2(x)\r\n x = self.Block3_1(x)\r\n x = self.Block3_2(x)\r\n x = self.Block3_3(x)\r\n x = self.Block3_4(x)\r\n x = self.Block4_1(x)\r\n x = self.Block4_2(x)\r\n x = self.Block4_3(x)\r\n x = self.Block4_4(x)\r\n x = self.Block5_1(x)\r\n x = self.Block5_2(x)\r\n x = self.MP(x)\r\n x = x.view(x.size(0),-1)\r\n x = self.fc(x)\r\n \r\n return x"
] | [
"0.65314513",
"0.6292418",
"0.6263829",
"0.6261159",
"0.62541807",
"0.6251553",
"0.6219152",
"0.62162906",
"0.61927277",
"0.6178997",
"0.6141839",
"0.6136382",
"0.6127676",
"0.6119838",
"0.6111306",
"0.61022437",
"0.6099669",
"0.60930973",
"0.60845137",
"0.60679704",
"0.60615677",
"0.6059268",
"0.6059033",
"0.6057288",
"0.60497737",
"0.6039589",
"0.60393703",
"0.6035947",
"0.6032868",
"0.60318774"
] | 0.7524717 | 0 |
Set nonmaximum suppression parameters. | def set_nms(self, nms_thresh=0.45, nms_topk=400, post_nms=100):
self._clear_cached_op()
self.nms_thresh = nms_thresh
self.nms_topk = nms_topk
self.post_nms = post_nms | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_suppress_flow(self):\n self.suppressed = self.packet_count\n self.fcip.update_one({'hash': self.fcip_hash},\n {'$set': {'suppressed': self.suppressed},})",
"def non_maxima_suppression(boxes, probs, classes_num, thr=0.2):\n for i, box in enumerate(boxes):\n if probs[i] == 0:\n continue\n for j in range(i+1, len(boxes)):\n if classes_num[i] == classes_num[j] and iou(box, boxes[j]) > thr:\n probs[j] = 0.0\n\n return probs",
"def set_cycle_suppression(self):\n self._cyclesuppression = True\n self.suppression_used = False",
"def non_heap_max(self, non_heap_max):\n\n self._non_heap_max = non_heap_max",
"def NoMore():\n\n if assem.MoreParameters():\n errors.DoWarning('extrign', False)",
"def NoMore():\n\n if assem.MoreParameters():\n errors.DoWarning('extrign', False)",
"def box_non_maximum_suppression(data=None, overlap_thresh=_Null, topk=_Null, coord_start=_Null, score_index=_Null, id_index=_Null, force_suppress=_Null, in_format=_Null, out_format=_Null, out=None, name=None, **kwargs):\n return (0,)",
"def SetLimit(self, *args):\n return _BRepAlgo.BRepAlgo_NormalProjection_SetLimit(self, *args)",
"def setPTLimits(*args):\n args[0].Limit.PTLimit.pt_limit = args[1]",
"def reset_uncertainties(self):\n\n # Make a new temporary ExoParameter using the original self.template\n # dictionary and copy the uncertainty values.\n blank = ExoParameter(\"fake\", attr_dict=self.template)\n self.uncertainty = blank.uncertainty\n self.uncertainty_lower = blank.uncertainty_lower\n self.uncertainty_upper = blank.uncertainty_upper",
"def suppress_pd(pars):\n pars = pars.copy()\n for p in pars:\n if p.endswith(\"_pd_n\"): pars[p] = 0\n return pars",
"def _build_non_max_suppressor(type):\n\n if type == model_config.SSD:\n score_threshold = config.cfg.POSTPROCESSOR.SCORE_THRESHOLD\n iou_threshold = config.cfg.POSTPROCESSOR.IOU_THRESHOLD\n max_detections_per_class = config.cfg.POSTPROCESSOR.MAX_DETECTIONS_PER_CLASS\n max_total_detections = config.cfg.POSTPROCESSOR.MAX_TOTAL_DETECTIONS\n elif type == model_config.FASTER_RCNN:\n score_threshold = config.cfg.POSTPROCESSOR.SCORE_THRESHOLD\n iou_threshold = config.cfg.POSTPROCESSOR.IOU_THRESHOLD\n max_detections_per_class = config.cfg.POSTPROCESSOR.MAX_DETECTIONS_PER_CLASS\n max_total_detections = config.cfg.POSTPROCESSOR.MAX_TOTAL_DETECTIONS\n else:\n raise ValueError('type must be ssd or faster_rcnn string')\n\n if iou_threshold < 0 or iou_threshold > 1.0:\n raise ValueError('iou_threshold not in [0, 1.0].')\n if max_detections_per_class > max_total_detections:\n raise ValueError('max_detections_per_class should be no greater than '\n 'max_total_detections.')\n\n non_max_suppressor_fn = functools.partial(\n post_processing.batch_multiclass_non_max_suppression,\n score_thresh=score_threshold,\n iou_thresh=iou_threshold,\n max_size_per_class=max_detections_per_class,\n max_total_size=max_total_detections)\n\n return non_max_suppressor_fn",
"def test_warning(self):\n self.p.compute_termination_criteria = True\n self.set_parameter_and_step(\"max_iter\", True, 5, \"ignore\")",
"def non_max_suppression(pred_bboxes, pred_labels, **kwargs):\n return tf.image.combined_non_max_suppression(\n pred_bboxes,\n pred_labels,\n **kwargs\n )",
"def set_parameters(self, **kwargs):\n kwargs.pop('population_size', None)\n super().set_parameters(population_size=1, **kwargs)\n self.candidates = None",
"def set_max_nb_instructions(nb): #py:set_max_nb_instructions\n RUR._set_max_nb_instructions_(nb)",
"def set_silent(self, **kw):\n self._set_attr(silent=True, **kw)",
"def non_maximum_suppression(image):\n # Find local maximas.\n neighborhood = generate_binary_structure(2,2)\n local_max = maximum_filter(image, footprint=neighborhood)==image\n local_max[image<(image.max()*0.1)] = False\n\n # Erode areas to single points.\n lbs, num = label(local_max)\n centers = center_of_mass(local_max, lbs, np.arange(num)+1)\n centers = np.stack(centers).round().astype(np.int)\n ret = np.zeros_like(image, dtype=np.bool)\n ret[centers[:,0], centers[:,1]] = True\n\n return ret",
"def limit_plasma(self, n_min=1e11, n_max=1e22, T_min=0.001, T_max=100.0):\n self.ne = np.clip(self.ne, n_min, n_max)\n self.ni = np.clip(self.ni, n_min, n_max)\n self.nn = np.clip(self.nn, n_min, n_max)\n self.Te = np.clip(self.Te, T_min, T_max)\n self.Ti = np.clip(self.Ti, T_min, T_max)",
"def set_params(self, maxn=None, minn=None):\n if maxn is not None:\n self._maxn = maxn\n if minn is not None:\n self._minn = minn",
"def set_default_parameters(self):\n super().set_default_parameters()\n if not \"n_sub_images\" in vars(self):\n self.n_sub_images = -1 # do all-sub-images",
"def suppress(self, t, w=None):\n return super(SmartCentroidPublisher, self).suppress(t, w)",
"def noisePreset() :\n s.noisePreset()",
"def setSeverityOverride(self, *args):\n return _libsbml.XMLErrorLog_setSeverityOverride(self, *args)",
"def _set_var_ignore(self):\n self._var_ignore = [k for k in self.__dict__.keys() if k[0] != '_']",
"def non_maximum_suppression(image):\n # Find local maximas.\n neighborhood = generate_binary_structure(2, 2)\n local_max = maximum_filter(image, footprint=neighborhood) == image\n local_max[image < (image.max() * 0.1)] = False\n\n # Erode areas to single points.\n lbs, num = label(local_max)\n centers = center_of_mass(local_max, lbs, np.arange(num) + 1)\n centers = np.stack(centers).round().astype(np.int)\n ret = np.zeros_like(image, dtype=np.bool)\n ret[centers[:, 0], centers[:, 1]] = True\n\n return ret",
"def non_maximum_suppression(image):\n # Find local maximas.\n neighborhood = generate_binary_structure(2, 2)\n local_max = maximum_filter(image, footprint=neighborhood) == image\n local_max[image < (image.max() * 0.1)] = False\n\n # Erode areas to single points.\n lbs, num = label(local_max)\n centers = center_of_mass(local_max, lbs, np.arange(num) + 1)\n centers = np.stack(centers).round().astype(np.int)\n ret = np.zeros_like(image, dtype=np.bool)\n ret[centers[:, 0], centers[:, 1]] = True\n\n return ret",
"def suppressWarningClass(clazz):\n _enabled.insert(0, (clazz, 0))",
"def setSilent(self) -> None:\n ...",
"def noiseoff(subarray=DEFAULT) :\n multiSubarray('noiseSource', subarray, False, False)\n multiSubarray('rfPower', subarray, True)"
] | [
"0.5966529",
"0.55709445",
"0.55309284",
"0.5446378",
"0.5345884",
"0.5345884",
"0.5295788",
"0.52417874",
"0.5239064",
"0.52170867",
"0.52112365",
"0.520788",
"0.5186509",
"0.51748765",
"0.51727885",
"0.5166071",
"0.510891",
"0.50601727",
"0.5058307",
"0.50549585",
"0.50475556",
"0.5040859",
"0.5023763",
"0.5018043",
"0.49946138",
"0.4983832",
"0.4983832",
"0.4980431",
"0.49724668",
"0.4964485"
] | 0.59055877 | 1 |
Reset class categories and class predictors. | def reset_class(self, classes):
self._clear_cached_op()
self._classes = classes
if self._pos_iou_thresh >= 1:
self._target_generator = YOLOV3TargetMerger(len(classes), self._ignore_iou_thresh)
for outputs in self.yolo_outputs:
outputs.reset_class(classes) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def reset(self):\n self.pred_classes.clear()\n self.gold_classes.clear()\n self.pred_probas.clear()\n self.gold_probas.clear()\n self.loss = 0\n self.nb_batches = 0\n self.prec_rec_f1 = None\n self.acc = None\n self.mcc = None",
"def reset(self):\n logging.info(\"Resetting DINTModel.\")\n if self.classifier:\n self.server.remove_model(self.classifier)\n # for ds in self.server.datasets:\n # self.server.remove_dataset(ds)\n # TODO: remove datasets?\n self.classifier = None",
"def _reset():\n global g_list_of_classifier\n global g_state\n\n g_state = False\n g_list_of_classifier = disco_classifiers([])",
"def reset(self):\n self.epochs = 0\n self.num_classes = 2 # Minimum of 2 classes\n self._random_state = check_random_state(self.random_state)\n if self.base_estimators:\n self.experts = [\n self.WeightedExpert(\n cp.deepcopy(be), 1, self.labels)\n for be in self.base_estimators\n ]\n else:\n self.experts = [\n self._construct_new_expert()\n ]",
"def reset(self):\n # must NOT reset color map here, otherwise we loose provided configs by user,\n # which are more important in this case for result images vs whatever the model task specified\n self.class_names = None\n self._map = None",
"def _untrain(self):\n if self.__clf:\n self.__clf._untrain()",
"def reset(self):\n self.train_loss.reset_states()\n self.train_accuracy.reset_states()\n self.val_loss.reset_states()\n self.val_accuracy.reset_states()\n self.train_mIoU.reset_states()\n self.val_mIoU.reset_states()",
"def reset(self):\n self.edges = None\n self.chi = None\n self.k = None\n self.n_bins = None\n self.classes = None\n self.n_params = None",
"def reset(self):\n self.pred = None\n self.target = None",
"def reset(self):\n self.pred = None\n self.target = None",
"def _reset(self) -> None:\n self.images = []\n self.activations = []\n self.labels = []\n self.preds = []\n self.n_found = 0",
"def _untrain(self):\n if not self.trained:\n return\n for clf in self.clfs:\n clf.untrain()\n super(BoostedClassifier, self)._untrain()",
"def _reset(self):\n self._set(\"_n_init_features\", None)\n self._set(\"_n_output_features\", None)\n self._set(\"_n_intervals\", None)\n self._set(\"_mapper\", {})\n self._set(\"_cpp_preprocessor\", None)\n self._set(\"_fitted\", False)",
"def reset(self):\n self._coco_gt = COCO()\n # Create an empty detection array with 7 columns:\n # (image_id, xmin, ymin, width, height, score, class)\n self._detections = np.empty(shape=(0, 7))\n self._images = set()",
"def finalize_class_set(self) -> None:\n logger.info(\"We have {} distinct classes, let's cluster it!\", len(self.classes))\n\n logger.debug(\"Created a cluster instance {} and this will cluster {} samples\", self.cluster, self.classes)\n try:\n assigned_clusters = self.cluster.cluster(vectors=[self.convert_str_list_to_vector(c) for c in self.classes],\n assign_clusters=True, trace=not execute_on_ssh_compute)\n except Exception:\n logger.exception(\"Failed to cluster the actual class set ({} samples)\", len(self.classes))\n return\n\n self.classes_to_one_hot_encode_dict.clear()\n for i in range(len(self.classes)):\n self.classes_to_one_hot_encode_dict[self.classes[i]] = assigned_clusters[i]",
"def reset(self):\n self.epochs = 0\n # Shuffle the training data\n perm = np.arange(self.num_train)\n np.random.shuffle(perm)\n assert self.num_train == self.train_images.shape[\n 0], 'Error incorrect shuffling mask'\n self.train_images = self.train_images[perm]\n self.train_labels = self.train_labels[perm]\n self.curr_train_index = 0",
"def delete_classification_head(self) -> None:\n del self.model.classifier",
"def reset_train_results(self):\n self.train_loss_results = {}\n self.train_accuracy_results = {}\n self.train_pred_results = {}",
"def reset(self) -> None:\n self.precision.reset()\n self.recall.reset()",
"def reset(self):\n\n self.scaler = None\n self.isFitted = False\n self.__create_scaler()",
"def _untrain(self):\n # untrain the mapper\n if self.__mapper is not None:\n self.__mapper.untrain()\n # let base class untrain as well\n super(MappedClassifier, self)._untrain()",
"def _reset(self):\n self._model._reset()\n super(RDPAnalyzer, self)._reset()",
"def _reset(self):\n [delattr(self, attr) for attr in ('_XtX', '_XtY', 'coef_', 'intercept_', 'is_fitted_') if hasattr(self, attr)]",
"def test_reset_training_set():\n classifier = classifier_module.Classifier(None)\n classifier.reset_training_set(117, \"a\")\n assert classifier.training_set == []\n assert classifier.training_size == 0\n assert classifier.ultimate_training_size == 117",
"def reset_train(self):\n\n self.model.apply(self._reset_weights)\n self.epoch_loss.reset()\n self.epoch = 0\n del self.batch_process\n self.batch_process = None",
"def reset_variables(self) -> None:\n self.attributs = {}\n self.data = []",
"def reset(self):\n self._pkgs.clear()\n self._catalogs.clear()\n self._categories.clear()\n self._command_to_category.clear()\n self._version = None",
"def reset(self):\n\n self.rotation = 0\n self.iteration = 0\n self.predictions = []\n self.prediction = 0\n self.current_position = 0\n self.rotation_list = [0]\n self.prediction = 0\n self.initial_adjust = False",
"def _reset(self):\n\n # Checking one attribute is enough, because they are all set together\n # in partial_fit\n if hasattr(self, 'scale_'):\n del self.scale_\n del self.n_samples_seen_\n del self.mean_\n del self.var_",
"def _post_transform(self):\n # Reclassify strategy post __init__, if needed.\n for (reclassifier, args, kwargs) in self._reclassifiers:\n self.classifier = reclassifier(self.classifier, *args, **kwargs)"
] | [
"0.72172534",
"0.6899433",
"0.6748963",
"0.6726632",
"0.66773576",
"0.6629043",
"0.6529936",
"0.6484326",
"0.6478405",
"0.6478405",
"0.6471619",
"0.6466328",
"0.6457058",
"0.62709737",
"0.6202787",
"0.61770207",
"0.6149169",
"0.612528",
"0.60959744",
"0.6091237",
"0.6087531",
"0.60872006",
"0.6081047",
"0.6066839",
"0.60223013",
"0.6012965",
"0.6002564",
"0.59998363",
"0.59868664",
"0.5971098"
] | 0.70744306 | 1 |
YOLO3 multiscale with darknet53 base network on VOC dataset. | def yolo3_darknet53_voc(pretrained_base=True, pretrained=False, num_sync_bn_devices=-1, **kwargs):
from ...data import VOCDetection
pretrained_base = False if pretrained else pretrained_base
base_net = darknet53(
pretrained=pretrained_base, num_sync_bn_devices=num_sync_bn_devices, **kwargs)
stages = [base_net.features[:15], base_net.features[15:24], base_net.features[24:]]
anchors = [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]]
strides = [8, 16, 32]
classes = VOCDetection.CLASSES
return get_yolov3(
'darknet53', stages, [512, 256, 128], anchors, strides, classes, 'voc',
pretrained=pretrained, num_sync_bn_devices=num_sync_bn_devices, **kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self):\n # TODO\n self.confThreshold = 0.6\n self.nmsThreshold = 0.5\n self.inpWidth = 320\n self.inpHeight = 320\n classesFile = \"/content/drive/My Drive/tracking_course/Detection/yolo_workshop/coco.names\"\n self.classes = None\n with open(classesFile,'rt') as f:\n self.classes = f.read().rstrip('\\n').split('\\n')\n\n modelConfiguration = \"/content/drive/My Drive/tracking_course/Detection/yolo_workshop/yolov3.cfg\"\n modelWeights = \"/content/drive/My Drive/tracking_course/Detection/yolo_workshop/yolov3.weights\"\n self.net = cv2.dnn.readNetFromDarknet(modelConfiguration, modelWeights)\n self.net.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV)\n self.net.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)",
"def get_pytorch_yolo(get_default_cifar10_subset):\n import cv2\n import torch\n\n from pytorchyolo import models\n from pytorchyolo.utils.loss import compute_loss\n\n from art.estimators.object_detection.pytorch_yolo import PyTorchYolo\n\n model_path = \"/tmp/PyTorch-YOLOv3/config/yolov3.cfg\"\n weights_path = \"/tmp/PyTorch-YOLOv3/weights/yolov3.weights\"\n model = models.load_model(model_path=model_path, weights_path=weights_path)\n\n class YoloV3(torch.nn.Module):\n def __init__(self, model):\n super().__init__()\n self.model = model\n\n def forward(self, x, targets=None):\n if self.training:\n outputs = self.model(x)\n # loss is averaged over a batch. Thus, for patch generation use batch_size = 1\n loss, loss_components = compute_loss(outputs, targets, self.model)\n\n loss_components_dict = {\"loss_total\": loss}\n\n return loss_components_dict\n else:\n return self.model(x)\n\n model = YoloV3(model)\n\n object_detector = PyTorchYolo(\n model=model, input_shape=(3, 416, 416), clip_values=(0, 1), attack_losses=(\"loss_total\",)\n )\n\n n_test = 10\n (_, _), (x_test_cifar10, y_test_cifar10) = get_default_cifar10_subset\n x_test_cifar10 = x_test_cifar10[0:n_test]\n\n x_test = cv2.resize(\n x_test_cifar10[0].transpose((1, 2, 0)), dsize=(416, 416), interpolation=cv2.INTER_CUBIC\n ).transpose((2, 0, 1))\n x_test = np.expand_dims(x_test, axis=0)\n x_test = np.repeat(x_test, repeats=2, axis=0)\n\n # Create labels\n\n result = object_detector.predict(x=x_test)\n\n y_test = [\n {\n \"boxes\": result[0][\"boxes\"],\n \"labels\": result[0][\"labels\"],\n \"scores\": np.ones_like(result[0][\"labels\"]),\n },\n {\n \"boxes\": result[1][\"boxes\"],\n \"labels\": result[1][\"labels\"],\n \"scores\": np.ones_like(result[1][\"labels\"]),\n },\n ]\n\n yield object_detector, x_test, y_test",
"def model_fn(model_dir):\n ctx = mx.cpu()\n net = gcv.model_zoo.get_model(\n 'yolo3_darknet53_voc',\n pretrained=False,\n ctx=ctx)\n batchify = gcv.data.batchify._stack_arrs\n net.load_parameters(os.path.join(model_dir, 'yolo3_darknet53_voc.params'), mx.cpu(0))\n net.hybridize()\n def image_transform(im_bytes):\n \"\"\"\n Apply image transformation to raw byte images\n \"\"\"\n img = [mx.image.imdecode(bytes.fromhex(im.lstrip('0x'))) for im in im_bytes]\n out = gcv.data.transforms.presets.yolo.transform_test(img)\n return out[0]\n\n return net, image_transform, batchify",
"def test():\n args = parse_args()\n\n devid = int(os.getenv('DEVICE_ID')) if os.getenv('DEVICE_ID') else 0\n context.set_context(mode=context.GRAPH_MODE, device_target='Ascend', save_graphs=True, device_id=devid)\n\n # logger\n args.outputs_dir = os.path.join(args.log_path,\n datetime.datetime.now().strftime('%Y-%m-%d_time_%H_%M_%S'))\n rank_id = int(os.environ.get('RANK_ID')) if os.environ.get('RANK_ID') else 0\n args.logger = get_logger(args.outputs_dir, rank_id)\n\n context.reset_auto_parallel_context()\n parallel_mode = ParallelMode.STAND_ALONE\n context.set_auto_parallel_context(parallel_mode=parallel_mode, gradients_mean=True, device_num=1)\n\n args.logger.info('Creating Network....')\n network = SolveOutput(YOLOV3DarkNet53(is_training=False))\n\n data_root = args.data_root\n ann_file = args.annFile\n\n args.logger.info(args.pretrained)\n if os.path.isfile(args.pretrained):\n param_dict = load_checkpoint(args.pretrained)\n param_dict_new = {}\n for key, values in param_dict.items():\n if key.startswith('moments.'):\n continue\n elif key.startswith('yolo_network.'):\n param_dict_new[key[13:]] = values\n else:\n param_dict_new[key] = values\n load_param_into_net(network, param_dict_new)\n args.logger.info('load_model {} success'.format(args.pretrained))\n else:\n args.logger.info('{} not exists or not a pre-trained file'.format(args.pretrained))\n assert FileNotFoundError('{} not exists or not a pre-trained file'.format(args.pretrained))\n exit(1)\n\n config = ConfigYOLOV3DarkNet53()\n if args.testing_shape:\n config.test_img_shape = conver_testing_shape(args)\n\n ds, data_size = create_yolo_dataset(data_root, ann_file, is_training=False, batch_size=1,\n max_epoch=1, device_num=1, rank=rank_id, shuffle=False,\n config=config)\n\n args.logger.info('testing shape : {}'.format(config.test_img_shape))\n args.logger.info('totol {} images to eval'.format(data_size))\n\n network.set_train(False)\n # build attacker\n attack = DeepFool(network, num_classes=80, model_type='detection', reserve_ratio=0.9, bounds=(0, 1))\n input_shape = Tensor(tuple(config.test_img_shape), ms.float32)\n\n args.logger.info('Start inference....')\n batch_num = args.samples_num\n adv_example = []\n for i, data in enumerate(ds.create_dict_iterator(num_epochs=1)):\n if i >= batch_num:\n break\n image = data[\"image\"]\n image_shape = data[\"image_shape\"]\n\n gt_boxes, gt_logits = network(image, input_shape)\n gt_boxes, gt_logits = gt_boxes.asnumpy(), gt_logits.asnumpy()\n gt_labels = np.argmax(gt_logits, axis=2)\n\n adv_img = attack.generate((image.asnumpy(), image_shape.asnumpy()), (gt_boxes, gt_labels))\n adv_example.append(adv_img)\n np.save('adv_example.npy', adv_example)",
"def yolo_object_detection(image_filename, net, confidence, threshold, labels, colors):\n # read image file\n # image is an array of image data (row, column, channel)\n image = cv2.imread(image_filename)\n (H, W) = image.shape[:2]\n\n # preprocess image data with rescaling and resizing to fit YOLO input shape\n # OpenCV assumes BGR images: we have to convert to RGB, with swapRB=True\n blob = cv2.dnn.blobFromImage(image, 1 / 255.0, (416, 416), swapRB=True, crop=False)\n\n # set a new input to the network\n net.setInput(blob)\n\n # get YOLOv3's output layer names\n ln = net.getLayerNames()\n ln_out = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]\n\n # perform object detection\n layerOutputs = net.forward(ln_out)\n\n\n # Get the result from outputs, and filter them by confidence\n boxes = []\n scores = []\n classes = []\n for output in layerOutputs: # There are three output layers in YOLO v3\n # Filter outputs by confidence\n (xywh_filterd, score_filtered, class_filtered) = filter_outputs(output, confidence)\n\n boxes.append(xywh_filterd)\n scores.append(score_filtered)\n classes.append(class_filtered)\n\n # Change shapes of arrays so that all boxes from any output layers are stored together\n boxes = np.vstack([r for r in boxes])\n scores = np.concatenate([r for r in scores], axis=None)\n classes = np.concatenate([r for r in classes], axis=None)\n\n # Apply Non-max supression\n boxes_coord = rescale_box_coord(boxes, W, H)\n nms_idx = yolo_non_max_supression(boxes_coord, scores, confidence, threshold)\n \n # filter the good ones\n return image, [{'box':boxes[_], 'score':scores[_], 'class':classes[_]} for _ in nms_idx]",
"def VLocNet_v3(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, classes=1000): # pooling=None,\n\n if weights not in {'imagenet', None}:\n raise ValueError('The `weights` argument should be either '\n '`None` (random initialization) or `imagenet` '\n '(pre-training on ImageNet).')\n\n if weights == 'imagenet' and include_top and classes != 1000:\n raise ValueError('If using `weights` as imagenet with `include_top`'\n ' as true, `classes` should be 1000')\n\n # Determine proper input shape\n # input_shape = _obtain_input_shape(input_shape,\n # default_size=224,\n # min_size=197,\n # data_format=K.image_data_format(),\n # include_top=include_top)\n #\n # if input_tensor is None:\n # img_input = Input(shape=input_shape)\n # else:\n # if not K.is_keras_tensor(input_tensor):\n # img_input = Input(tensor=input_tensor, shape=input_shape)\n # else:\n # img_input = input_tensor\n # if K.image_data_format() == 'channels_last':\n # bn_axis = 3\n # else:\n # bn_axis = 1\n\n # 1st branch for the t-1 odometry regression\n input_odo_0 = Input(shape=(224, 224, 3), name='input_odo_0')\n\n odo_1_0 = ResNet_50_unit_1(input_tensor=input_odo_0, bn_axis=3, activation='elu', strides=(2, 2), branch='_odo_t_p')\n\n odo_2_0 = ResNet_50_unit_2(input_tensor=odo_1_0, activation='elu', strides=(1, 1), branch='_odo_t_p')\n\n # odo_3_0 = ResNet_50_unit_3(input_tensor=odo_2_0, activation='elu', branch='_odo_t_p')\n\n odo_4_0 = ResNet_50_unit_4(input_tensor=odo_2_0, activation='elu', branch='_odo_t_p')\n\n # 2nd branch for the t odometry regression\n input_odo_1 = Input(shape=(224, 224, 3), name='input_odo_1')\n\n odo_1_1 = ResNet_50_unit_1(input_tensor=input_odo_1, bn_axis=3, activation='elu', strides=(2, 2), branch='_odo_t')\n\n odo_2_1 = ResNet_50_unit_2(input_tensor=odo_1_1, activation='elu', strides=(1, 1), branch='_odo_t')\n\n # odo_3_1 = ResNet_50_unit_3(input_tensor=odo_2_1, activation='elu', branch='_odo_t')\n\n odo_4_1 = ResNet_50_unit_4(input_tensor=odo_2_1, activation='elu', branch='_odo_t')\n\n # Concatenate the features from 1st and 2nd branches\n conca = concatenate([odo_4_0, odo_4_1], name='conca')\n\n odo_5 = ResNet_50_unit_5(input_tensor=conca, activation='elu', branch='_odo_all')\n\n # avg_pool = AveragePooling2D((7, 7), name='avg_pool')(odo_5)\n\n odo_glo_ave = GlobalAveragePooling2D()(odo_5)\n\n odo_fc_1 = Dense(1024, name='odo_fc_1', kernel_initializer=initializers.RandomNormal(mean=0, stddev=0.05),\n bias_initializer=initializers.RandomNormal(mean=0, stddev=0.05))(odo_glo_ave)\n\n odo_fc_2 = Dense(3, name='odo_fc_2', kernel_initializer=initializers.RandomNormal(mean=0, stddev=0.05),\n bias_initializer=initializers.RandomNormal(mean=0, stddev=0.05))(odo_fc_1)\n\n odo_fc_3 = Dense(4, name='odo_fc_3', kernel_initializer=initializers.RandomNormal(mean=0, stddev=0.05),\n bias_initializer=initializers.RandomNormal(mean=0, stddev=0.05))(odo_fc_1)\n\n odo_merge = concatenate([odo_fc_2, odo_fc_3], name='odo_merge') # Modification\n\n # The network branch for the Pose part:\n\n pose_4 = ResNet_50_unit_4(input_tensor=odo_2_1, activation='elu', branch='_geo')\n\n # The Previous Pose back-feeding\n\n input_previous_pose = Input(shape=(7, ), name='input_previous_pose')\n\n previous_fc_4 = Dense(802816, name='previous_fc_4')(input_previous_pose)\n\n res_previous = Reshape((28, 28, 1024), name='res_previous')(previous_fc_4)\n\n # Concatenation the previous pose back to the residual unit\n con_4 = concatenate([pose_4, res_previous], name='previous_and_geo4_merge')\n\n pose_5 = ResNet_50_unit_5(input_tensor=con_4, activation='elu', branch='_geo')\n\n pose_glo_ave = GlobalAveragePooling2D()(pose_5)\n\n pose_fc_1 = Dense(1024, name='pose_fc_1', kernel_initializer=initializers.RandomNormal(mean=0, stddev=0.05),\n bias_initializer=initializers.RandomNormal(mean=0, stddev=0.05))(pose_glo_ave)\n\n pose_fc_2 = Dense(3, name='pose_fc_2', kernel_initializer=initializers.RandomNormal(mean=0, stddev=0.05),\n bias_initializer=initializers.RandomNormal(mean=0, stddev=0.05))(pose_fc_1)\n\n pose_fc_3 = Dense(4, name='pose_fc_3', kernel_initializer=initializers.RandomNormal(mean=0, stddev=0.05),\n bias_initializer=initializers.RandomNormal(mean=0, stddev=0.05))(pose_fc_1)\n\n pose_merge = concatenate([odo_fc_2, odo_fc_3, pose_fc_2, pose_fc_3], name='pose_merge') # Modification\n\n # Create model.\n # model = Model(input=[input_odo_0, input_odo_1], output=[odo_fc_2, odo_fc_3, pose_fc_2, pose_fc_3],\n # name='VLocNet_full')\n\n # changed the model from 4 outputs to 2 outputs\n model = Model(input=[input_odo_0, input_odo_1, input_previous_pose], output=[odo_merge, pose_merge],\n name='VLocNet_full')\n\n # load weights\n if weights == 'imagenet':\n if include_top:\n weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels.h5',\n WEIGHTS_PATH,\n cache_subdir='models',\n md5_hash='a7b3fe01876f51b976af0dea6bc144eb')\n else:\n weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5',\n WEIGHTS_PATH_NO_TOP,\n cache_subdir='models',\n md5_hash='a268eb855778b3df3c7506639542a6af')\n model.load_weights(weights_path)\n if K.backend() == 'theano':\n layer_utils.convert_all_kernels_in_model(model)\n\n if K.image_data_format() == 'channels_first':\n if include_top:\n maxpool = model.get_layer(name='avg_pool')\n shape = maxpool.output_shape[1:]\n dense = model.get_layer(name='fc1000')\n layer_utils.convert_dense_weights_data_format(dense, shape, 'channels_first')\n\n if K.backend() == 'tensorflow':\n warnings.warn('You are using the TensorFlow backend, yet you '\n 'are using the Theano '\n 'image data format convention '\n '(`image_data_format=\"channels_first\"`). '\n 'For best performance, set '\n '`image_data_format=\"channels_last\"` in '\n 'your Keras config '\n 'at ~/.keras/keras.json.')\n return model",
"def yolo_detection(raw_image):\n class_ids = []\n confidences = []\n boxes = []\n height , width ,c= raw_image.shape\n blob = cv2.dnn.blobFromImage(raw_image, 0.00392, (416,416), (0,0,0), True, crop=False)\n net.setInput(blob)\n outs = net.forward(output_layers)\n\n for out in outs:\n for detection in out:\n scores = detection[5:]\n class_id = np.argmax(scores)\n confidence = scores[class_id]\n if confidence > 0.4:\n center_x = int(detection[0]*width)\n center_y = int(detection[1]*height)\n w = int(detection[2]*width)\n h = int(detection[3]*height)\n ##Rectangle Draw\n topleft_x = int(center_x-(w/2))\n topleft_y = int(center_y-(h/2))\n\n boxes.append([topleft_x,topleft_y,w,h])\n confidences.append(float(confidence))\n class_ids.append(class_id)\n indexes = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)\n #DISPLAY DETECTION\n total_detections = len(boxes)\n for i in range(total_detections):\n if i in indexes:\n topleft_x, topleft_y, w,h = boxes[i]\n label = detection_classes[class_ids[i]]\n cv2.rectangle(raw_image, (topleft_x,topleft_y), (topleft_x+w,topleft_y+h), (0,100,255), 1)\n cv2.putText(raw_image, label, (topleft_x, topleft_y),cv2.FONT_HERSHEY_COMPLEX,1,(0,165,255))\n\n\n return raw_image",
"def yolo_v3(inputs, num_classes, is_training=False, data_format='NCHW', reuse=False, with_spp=False):\n # it will be needed later on\n img_size = inputs.get_shape().as_list()[1:3]\n\n # transpose the inputs to NCHW\n if data_format == 'NCHW':\n inputs = tf.transpose(inputs, [0, 3, 1, 2])\n\n # normalize values to range [0..1]\n inputs = inputs / 255\n\n # set batch norm params\n batch_norm_params = {\n 'decay': _BATCH_NORM_DECAY,\n 'epsilon': _BATCH_NORM_EPSILON,\n 'scale': True,\n 'is_training': is_training,\n 'fused': None, # Use fused batch norm if possible.\n }\n\n # Set activation_fn and parameters for conv2d, batch_norm.\n with slim.arg_scope([slim.conv2d, slim.batch_norm, _fixed_padding], data_format=data_format, reuse=reuse):\n with slim.arg_scope([slim.conv2d], normalizer_fn=slim.batch_norm,\n normalizer_params=batch_norm_params,\n biases_initializer=None,\n activation_fn=lambda x: tf.nn.leaky_relu(x, alpha=_LEAKY_RELU)):\n with tf.variable_scope('darknet-53'):\n route_1, route_2, inputs = darknet53(inputs)\n\n with tf.variable_scope('yolo-v3'):\n route, inputs = _yolo_block(inputs, 512, data_format, with_spp)\n\n detect_1 = _detection_layer(\n inputs, num_classes, _ANCHORS[6:9], img_size, data_format)\n detect_1 = tf.identity(detect_1, name='detect_1')\n\n inputs = _conv2d_fixed_padding(route, 256, 1)\n upsample_size = route_2.get_shape().as_list()\n inputs = _upsample(inputs, upsample_size, data_format)\n inputs = tf.concat([inputs, route_2],\n axis=1 if data_format == 'NCHW' else 3)\n\n route, inputs = _yolo_block(inputs, 256)\n\n detect_2 = _detection_layer(\n inputs, num_classes, _ANCHORS[3:6], img_size, data_format)\n detect_2 = tf.identity(detect_2, name='detect_2')\n\n inputs = _conv2d_fixed_padding(route, 128, 1)\n upsample_size = route_1.get_shape().as_list()\n inputs = _upsample(inputs, upsample_size, data_format)\n inputs = tf.concat([inputs, route_1],\n axis=1 if data_format == 'NCHW' else 3)\n\n _, inputs = _yolo_block(inputs, 128)\n\n detect_3 = _detection_layer(\n inputs, num_classes, _ANCHORS[0:3], img_size, data_format)\n detect_3 = tf.identity(detect_3, name='detect_3')\n\n detections = tf.concat([detect_1, detect_2, detect_3], axis=1)\n detections = tf.identity(detections, name='detections')\n return detections",
"def yolo3_mobilenetv3large_body(inputs, num_anchors, num_classes, alpha=1.0):\r\n mobilenetv3large = MobileNetV3Large(input_tensor=inputs, weights='imagenet', include_top=False, alpha=alpha)\r\n print('backbone layers number: {}'.format(len(mobilenetv3large.layers)))\r\n\r\n # input: 416 x 416 x 3\r\n # activation_38(layer 194, final feature map): 13 x 13 x (960*alpha)\r\n # expanded_conv_14/Add(layer 191, end of block14): 13 x 13 x (160*alpha)\r\n\r\n # activation_29(layer 146, middle in block12) : 26 x 26 x (672*alpha)\r\n # expanded_conv_11/Add(layer 143, end of block11) : 26 x 26 x (112*alpha)\r\n\r\n # activation_15(layer 79, middle in block6) : 52 x 52 x (240*alpha)\r\n # expanded_conv_5/Add(layer 76, end of block5): 52 x 52 x (40*alpha)\r\n\r\n # NOTE: activation layer name may different for TF1.x/2.x, so we\r\n # use index to fetch layer\r\n # f1: 13 x 13 x (960*alpha)\r\n f1 = mobilenetv3large.layers[194].output\r\n # f2: 26 x 26 x (672*alpha)\r\n f2 = mobilenetv3large.layers[146].output\r\n # f3: 52 x 52 x (240*alpha)\r\n f3 = mobilenetv3large.layers[79].output\r\n\r\n f1_channel_num = int(960*alpha)\r\n f2_channel_num = int(672*alpha)\r\n f3_channel_num = int(240*alpha)\r\n #f1_channel_num = 1024\r\n #f2_channel_num = 512\r\n #f3_channel_num = 256\r\n\r\n y1, y2, y3 = yolo3_predictions((f1, f2, f3), (f1_channel_num, f2_channel_num, f3_channel_num), num_anchors, num_classes)\r\n\r\n return Model(inputs = inputs, outputs=[y1,y2,y3])",
"def darknet53():\r\n\r\n darknet = DarkNet(\r\n block=ResidualBlock,\r\n layer_nums=[1, 2, 8, 8, 4],\r\n in_channels=[32, 64, 128, 256, 512],\r\n out_channels=[64, 128, 256, 512, 1024],\r\n )\r\n\r\n return darknet",
"def cspdarknet53_tiny(input_data):\n input_data = common.convolutional(input_data, (3, 3, 3, 32), downsample=True)\n input_data = common.convolutional(input_data, (3, 3, 32, 64), downsample=True)\n input_data = common.convolutional(input_data, (3, 3, 64, 64))\n\n route = input_data\n input_data = common.route_group(input_data, 2, 1)\n input_data = common.convolutional(input_data, (3, 3, 32, 32))\n route_1 = input_data\n input_data = common.convolutional(input_data, (3, 3, 32, 32))\n input_data = tf.concat([input_data, route_1], -1)\n input_data = common.convolutional(input_data, (1, 1, 32, 64))\n input_data = tf.concat([route, input_data], -1)\n input_data = tf.keras.layers.MaxPool2D(2, 2, 'same')(input_data)\n\n input_data = common.convolutional(input_data, (3, 3, 64, 128))\n route = input_data\n input_data = common.route_group(input_data, 2, 1)\n input_data = common.convolutional(input_data, (3, 3, 64, 64))\n route_1 = input_data\n input_data = common.convolutional(input_data, (3, 3, 64, 64))\n input_data = tf.concat([input_data, route_1], -1)\n input_data = common.convolutional(input_data, (1, 1, 64, 128))\n input_data = tf.concat([route, input_data], -1)\n input_data = tf.keras.layers.MaxPool2D(2, 2, 'same')(input_data)\n\n input_data = common.convolutional(input_data, (3, 3, 128, 256))\n route = input_data\n input_data = common.route_group(input_data, 2, 1)\n input_data = common.convolutional(input_data, (3, 3, 128, 128))\n route_1 = input_data\n input_data = common.convolutional(input_data, (3, 3, 128, 128))\n input_data = tf.concat([input_data, route_1], -1)\n input_data = common.convolutional(input_data, (1, 1, 128, 256))\n route_1 = input_data\n input_data = tf.concat([route, input_data], -1)\n input_data = tf.keras.layers.MaxPool2D(2, 2, 'same')(input_data)\n\n input_data = common.convolutional(input_data, (3, 3, 512, 512))\n\n return route_1, input_data",
"def VLocNet_v2(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, classes=1000): # pooling=None,\n\n if weights not in {'imagenet', None}:\n raise ValueError('The `weights` argument should be either '\n '`None` (random initialization) or `imagenet` '\n '(pre-training on ImageNet).')\n\n if weights == 'imagenet' and include_top and classes != 1000:\n raise ValueError('If using `weights` as imagenet with `include_top`'\n ' as true, `classes` should be 1000')\n\n # Determine proper input shape\n # input_shape = _obtain_input_shape(input_shape,\n # default_size=224,\n # min_size=197,\n # data_format=K.image_data_format(),\n # include_top=include_top)\n #\n # if input_tensor is None:\n # img_input = Input(shape=input_shape)\n # else:\n # if not K.is_keras_tensor(input_tensor):\n # img_input = Input(tensor=input_tensor, shape=input_shape)\n # else:\n # img_input = input_tensor\n # if K.image_data_format() == 'channels_last':\n # bn_axis = 3\n # else:\n # bn_axis = 1\n\n # 1st branch for the t-1 odometry regression\n input_odo_0 = Input(shape=(224, 224, 3), name='input_odo_0')\n\n odo_1_0 = ResNet_50_unit_1(input_tensor=input_odo_0, bn_axis=3, activation='elu', strides=(2, 2), branch='_odo_t_p')\n\n odo_2_0 = ResNet_50_unit_2(input_tensor=odo_1_0, activation='elu', strides=(1, 1), branch='_odo_t_p')\n\n odo_3_0 = ResNet_50_unit_3(input_tensor=odo_2_0, activation='elu', branch='_odo_t_p')\n\n odo_4_0 = ResNet_50_unit_4(input_tensor=odo_3_0, activation='elu', branch='_odo_t_p')\n\n # 2nd branch for the t odometry regression\n input_odo_1 = Input(shape=(224, 224, 3), name='input_odo_1')\n\n odo_1_1 = ResNet_50_unit_1(input_tensor=input_odo_1, bn_axis=3, activation='elu', strides=(2, 2), branch='_odo_t')\n\n odo_2_1 = ResNet_50_unit_2(input_tensor=odo_1_1, activation='elu', strides=(1, 1), branch='_odo_t')\n\n odo_3_1 = ResNet_50_unit_3(input_tensor=odo_2_1, activation='elu', branch='_odo_t')\n\n odo_4_1 = ResNet_50_unit_4(input_tensor=odo_3_1, activation='elu', branch='_odo_t')\n\n # Concatenate the features from 1st and 2nd branches\n conca = concatenate([odo_4_0, odo_4_1], name='conca')\n\n odo_5 = ResNet_50_unit_5(input_tensor=conca, activation='elu', branch='_odo_all')\n\n # avg_pool = AveragePooling2D((7, 7), name='avg_pool')(odo_5)\n\n odo_glo_ave = GlobalAveragePooling2D()(odo_5)\n\n odo_fc_1 = Dense(1024, name='odo_fc_1', kernel_initializer=initializers.RandomNormal(mean=0, stddev=0.05),\n bias_initializer=initializers.RandomNormal(mean=0, stddev=0.05))(odo_glo_ave)\n\n odo_fc_2 = Dense(3, name='odo_fc_2', kernel_initializer=initializers.RandomNormal(mean=0, stddev=0.05),\n bias_initializer=initializers.RandomNormal(mean=0, stddev=0.05))(odo_fc_1)\n\n odo_fc_3 = Dense(4, name='odo_fc_3', kernel_initializer=initializers.RandomNormal(mean=0, stddev=0.05),\n bias_initializer=initializers.RandomNormal(mean=0, stddev=0.05))(odo_fc_1)\n\n odo_merge = concatenate([odo_fc_2, odo_fc_3], name='odo_merge') # Modification\n\n # The network branch for the Pose part:\n\n pose_4 = ResNet_50_unit_4(input_tensor=odo_3_1, activation='elu', branch='_geo')\n\n # The Previous Pose back-feeding\n\n input_previous_pose = Input(shape=(7, ), name='input_previous_pose')\n\n previous_fc_4 = Dense(200704, name='previous_fc_4')(input_previous_pose)\n\n res_previous = Reshape((14, 14, 1024), name='res_previous')(previous_fc_4)\n\n # Concatenation the previous pose back to the residual unit\n con_4 = concatenate([pose_4, res_previous], name='previous_and_geo4_merge')\n\n pose_5 = ResNet_50_unit_5(input_tensor=con_4, activation='elu', branch='_geo')\n\n pose_glo_ave = GlobalAveragePooling2D()(pose_5)\n\n pose_fc_1 = Dense(1024, name='pose_fc_1', kernel_initializer=initializers.RandomNormal(mean=0, stddev=0.05),\n bias_initializer=initializers.RandomNormal(mean=0, stddev=0.05))(pose_glo_ave)\n\n pose_fc_2 = Dense(3, name='pose_fc_2', kernel_initializer=initializers.RandomNormal(mean=0, stddev=0.05),\n bias_initializer=initializers.RandomNormal(mean=0, stddev=0.05))(pose_fc_1)\n\n pose_fc_3 = Dense(4, name='pose_fc_3', kernel_initializer=initializers.RandomNormal(mean=0, stddev=0.05),\n bias_initializer=initializers.RandomNormal(mean=0, stddev=0.05))(pose_fc_1)\n\n pose_merge = concatenate([odo_fc_2, odo_fc_3, pose_fc_2, pose_fc_3], name='pose_merge') # Modification\n\n # Create model.\n # model = Model(input=[input_odo_0, input_odo_1], output=[odo_fc_2, odo_fc_3, pose_fc_2, pose_fc_3],\n # name='VLocNet_full')\n\n # changed the model from 4 outputs to 2 outputs\n model = Model(input=[input_odo_0, input_odo_1, input_previous_pose], output=[odo_merge, pose_merge],\n name='VLocNet_full')\n\n # load weights\n if weights == 'imagenet':\n if include_top:\n weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels.h5',\n WEIGHTS_PATH,\n cache_subdir='models',\n md5_hash='a7b3fe01876f51b976af0dea6bc144eb')\n else:\n weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5',\n WEIGHTS_PATH_NO_TOP,\n cache_subdir='models',\n md5_hash='a268eb855778b3df3c7506639542a6af')\n model.load_weights(weights_path)\n if K.backend() == 'theano':\n layer_utils.convert_all_kernels_in_model(model)\n\n if K.image_data_format() == 'channels_first':\n if include_top:\n maxpool = model.get_layer(name='avg_pool')\n shape = maxpool.output_shape[1:]\n dense = model.get_layer(name='fc1000')\n layer_utils.convert_dense_weights_data_format(dense, shape, 'channels_first')\n\n if K.backend() == 'tensorflow':\n warnings.warn('You are using the TensorFlow backend, yet you '\n 'are using the Theano '\n 'image data format convention '\n '(`image_data_format=\"channels_first\"`). '\n 'For best performance, set '\n '`image_data_format=\"channels_last\"` in '\n 'your Keras config '\n 'at ~/.keras/keras.json.')\n return model",
"def main(\n image = None ,\n gpu = -1,\n weights_path= f\"{ Path(__file__).parent }/weights/yolov3.weights\",\n background = False\n):\n print( weights_path )\n my_path = Path( __file__ ).parent\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--gpu', type=int, default= gpu )\n parser.add_argument('--cfg', type=str, default=my_path/'config/yolov3_default.cfg')\n parser.add_argument('--ckpt', type=str,\n help='path to the checkpoint file')\n parser.add_argument('--weights_path', type=str,\n default= weights_path, help='path to weights file')\n parser.add_argument('--image', type=str , default= image )\n parser.add_argument('--background', type=bool,\n default= background , help='background(no-display mode. save \"./output.png\")')\n parser.add_argument('--detect_thresh', type=float,\n default= 0.5 , help='confidence threshold')\n args = parser.parse_args()\n\n with open(args.cfg, 'r') as f:\n cfg = yaml.load(f)\n\n imgsize = cfg['TEST']['IMGSIZE']\n model = YOLOv3(cfg['MODEL'])\n\n confthre = cfg['TEST']['CONFTHRE'] \n nmsthre = cfg['TEST']['NMSTHRE']\n\n if args.detect_thresh:\n confthre = args.detect_thresh\n\n\n\n img = imread( args.image )\n if img is None :\n print( \"load image failed\" )\n print( args.image )\n return\n\n img_raw = img.copy()[:, :, ::-1].transpose((2, 0, 1))\n img, info_img = preprocess(img, imgsize, jitter=0) # info = (h, w, nh, nw, dx, dy)\n img = np.transpose(img / 255., (2, 0, 1))\n img = torch.from_numpy(img).float().unsqueeze(0)\n\n if args.gpu >= 0:\n model.cuda(args.gpu)\n img = Variable(img.type(torch.cuda.FloatTensor))\n else:\n img = Variable(img.type(torch.FloatTensor))\n\n assert args.weights_path or args.ckpt, 'One of --weights_path and --ckpt must be specified'\n\n if args.weights_path:\n print(\"loading yolo weights %s\" % (args.weights_path))\n parse_yolo_weights(model, args.weights_path)\n elif args.ckpt:\n print(\"loading checkpoint %s\" % (args.ckpt))\n state = torch.load(args.ckpt)\n if 'model_state_dict' in state.keys():\n model.load_state_dict(state['model_state_dict'])\n else:\n model.load_state_dict(state)\n\n model.eval()\n\n\n with torch.no_grad():\n outputs1 = model(img)\n # np.save(\"output.npy\" , outputs.numpy() )\n # torch.save( outputs1 , \"outputs1.pt\" )\n out1 = torch.load( \"outputs1.pt\" )\n rere = torch.equal( outputs1 , out1 )\n outputs = postprocess(outputs1, 80, confthre, nmsthre)\n\n a = \"hoho\"\n\n\n if outputs[0] is None:\n print(\"No Objects Deteted!!\")\n return\n\n coco_class_names, coco_class_ids, coco_class_colors = get_coco_label_names()\n\n bboxes = list()\n classes = list()\n colors = list()\n\n for x1, y1, x2, y2, conf, cls_conf, cls_pred in outputs[0]:\n\n cls_id = coco_class_ids[int(cls_pred)]\n print(int(x1), int(y1), int(x2), int(y2), float(conf), int(cls_pred))\n print('\\t+ Label: %s, Conf: %.5f' %\n (coco_class_names[cls_id], cls_conf.item()))\n box = yolobox2label([y1, x1, y2, x2], info_img)\n bboxes.append(box)\n classes.append(cls_id)\n colors.append(coco_class_colors[int(cls_pred)])\n\n # args.background = True\n\n if args.background:\n import matplotlib\n matplotlib.use('Agg')\n\n from utils.vis_bbox import vis_bbox\n\n vis_bbox(\n img_raw, bboxes, label=classes, label_names=coco_class_names,\n instance_colors=colors, linewidth=2)\n\n\n if args.background:\n output = Path( \"./output\" )\n output.mkdir( parents=True , exist_ok=True )\n now = datetime.now().strftime(\"%Y-%m-%d %H-%M-%S\")\n output /= f\"output-{now}.png\"\n plt.savefig( output )\n\n return str( output.absolute() )\n # return plt_to_qpixmap(plt.gca())\n else :\n plt.show()",
"def build_model_mobilenet(num_classes):",
"def yolo4_mobilenetv3large_body(inputs, num_anchors, num_classes, alpha=1.0):\r\n mobilenetv3large = MobileNetV3Large(input_tensor=inputs, weights='imagenet', include_top=False, alpha=alpha)\r\n print('backbone layers number: {}'.format(len(mobilenetv3large.layers)))\r\n\r\n # input: 416 x 416 x 3\r\n # activation_38(layer 194, final feature map): 13 x 13 x (960*alpha)\r\n # expanded_conv_14/Add(layer 191, end of block14): 13 x 13 x (160*alpha)\r\n\r\n # activation_29(layer 146, middle in block12) : 26 x 26 x (672*alpha)\r\n # expanded_conv_11/Add(layer 143, end of block11) : 26 x 26 x (112*alpha)\r\n\r\n # activation_15(layer 79, middle in block6) : 52 x 52 x (240*alpha)\r\n # expanded_conv_5/Add(layer 76, end of block5): 52 x 52 x (40*alpha)\r\n\r\n # NOTE: activation layer name may different for TF1.x/2.x, so we\r\n # use index to fetch layer\r\n # f1: 13 x 13 x (960*alpha)\r\n f1 = mobilenetv3large.layers[194].output\r\n # f2: 26 x 26 x (672*alpha) for 416 input\r\n f2 = mobilenetv3large.layers[146].output\r\n # f3: 52 x 52 x (240*alpha) for 416 input\r\n f3 = mobilenetv3large.layers[79].output\r\n\r\n f1_channel_num = int(960*alpha)\r\n f2_channel_num = int(672*alpha)\r\n f3_channel_num = int(240*alpha)\r\n #f1_channel_num = 1024\r\n #f2_channel_num = 512\r\n #f3_channel_num = 256\r\n\r\n y1, y2, y3 = yolo4_predictions((f1, f2, f3), (f1_channel_num, f2_channel_num, f3_channel_num), num_anchors, num_classes)\r\n\r\n return Model(inputs, [y1, y2, y3])",
"def do_stuff(self, net, meta):\n cv2_img = self.img_to_cv2(self.last_img)\n # Now we can use cv2 functions as the image is <type 'numpy.ndarray'>\n # rospy.loginfo(\"cv2_img: \" + str(type(cv2_img)))\n # Your OpenCV stuff\n # cv2_img = cv2.resize(cv2_img, (0,0), fx=0.25, fy=0.25) \n\n (rows,cols,channels) = cv2_img.shape\n # if cols > 60 and rows > 60 :\n # cv2.circle(cv2_img, (50,50), 10, 255)\n \n global x_old\n global no_meas_counter\n global est\n global cor\n global w\n global h\n \n\n r = darknet.detect(net, meta, cv2_img)\n # print(r)\n\n if not r:\n no_meas_counter += 1\n\n for i in r:\n if i[0].decode() == \"person\":\n x, y, w, h = i[2][0], i[2][1], i[2][2], i[2][3]\n xmin, ymin, xmax, ymax = darknet.convertBack(float(x), float(y), float(w), float(h))\n pt1 = (xmin, ymin)\n pt2 = (xmax, ymax)\n cv2.rectangle(cv2_img, pt1, pt2, (0, 255, 0), 2)\n cv2.putText(cv2_img, i[0].decode() + \" [\" + str(round(i[1] * 100, 2)) + \"]\", (pt1[0], pt1[1] + 20), cv2.FONT_HERSHEY_SIMPLEX, 1, [0, 255, 0], 4)\n \n global mp\n mp = np.array([[np.float32(x)],[np.float32(y)]])\n cor = kalman.correct(mp)\n no_meas_counter = 0\n\t\t\n\n else:\n no_meas_counter += 1\n \n # x_old = x\n\n # cv2.imshow(\"cv2_img\", cv2_img)\n # k = cv2.waitKey(1)\n # if k == 27:\n # cv2.destroyAllWindows()\n # exit()\n\n if no_meas_counter < 30:\n est = kalman.predict()\n msg = PolygonStamped()\n msg.header.stamp = rospy.Time.now()\n # msg.polygon.points = [Point32(x=x, y=y), Point32(x=cols, y=rows), Point32(x=w, y=h)]\n msg.polygon.points = [Point32(x=est[0], y=est[1]), Point32(x=cols, y=rows), Point32(x=w, y=h)] \n self.pub_yolo_detection.publish(msg)\n\n # cv2.imshow(\"Image window\", cv2_img)\n # cv2.waitKey(3)\n\n self.pub_images(cv2_img)\n self.is_new_img = False",
"def multiclass():\n # load\n print(\"Loading data...\")\n train_y_cpu, train_x_cpu = dataio.bin_to_tensors(constants.TRAIN_ONEHOT, 10)\n val_y_cpu, val_x_cpu = dataio.bin_to_tensors(constants.VAL_ONEHOT, 10)\n\n print(\"Moving data to GPU...\")\n train_y = train_y_cpu.type(IntTT)\n train_x = train_x_cpu.type(FloatTT)\n val_y = val_y_cpu.type(IntTT)\n val_x = val_x_cpu.type(FloatTT)\n\n print(\"Starting experiments...\")\n dummy = 0.0\n\n # OLS analytic solution. uses CPU tensors to go to/from numpy for\n # pseudoinverse.\n w = ols_analytic(train_x_cpu, train_y_cpu)\n report(\n \"[multiclass] OLS analytic (train)\",\n w,\n train_x,\n train_y,\n dummy,\n multiclass_eval,\n ols_loss,\n )\n report(\n \"[multiclass] OLS analytic (val)\",\n w,\n val_x,\n val_y,\n dummy,\n multiclass_eval,\n ols_loss,\n )\n\n # # OLS gradient descent\n # ols_gd_settings: GDSettings = {'lr': 0.02, 'epochs': 3500, 'report_interval': 500}\n # w = gradient_descent(train_x, train_y, -1, ols_loss, ols_gradient, ols_gd_settings)\n # report('[multiclass] OLS GD (train)', w, train_x, train_y, dummy, multiclass_eval, ols_loss)\n # report('[multiclass] OLS GD (val)', w, val_x, val_y, dummy, multiclass_eval, ols_loss)\n\n # # OLS coordinate descent\n # w = coordinate_descent(train_x, train_y, dummy, ols_cd_weight_update, ols_loss, {'epochs': 150, 'report_interval': 10})\n # report('[multiclass] OLS CD (train)', w, train_x, train_y, dummy, multiclass_eval, ols_loss)\n # report('[multiclass] OLS CD (val)', w, val_x, val_y, dummy, multiclass_eval, ols_loss)\n\n # ridge analytic solution\n for lmb in [0.2]:\n w = ridge_analytic(train_x, train_y, lmb)\n report(\n \"[multiclass] Ridge analytic (train) lambda={}\".format(lmb),\n w,\n train_x,\n train_y,\n lmb,\n multiclass_eval,\n ridge_loss,\n )\n report(\n \"[multiclass] Ridge analytic (val) lambda={}\".format(lmb),\n w,\n val_x,\n val_y,\n lmb,\n multiclass_eval,\n ridge_loss,\n )\n\n # ridge gradient descent\n # ridge_gd_settings: GDSettings = {'lr': 0.02, 'epochs': 3500, 'report_interval': 500}\n # for lmb in [0.2]:\n # w = gradient_descent(train_x, train_y, lmb, ridge_loss, ridge_gradient, ridge_gd_settings)\n # report('[multiclass] Ridge GD (train)', w, train_x, train_y, lmb, multiclass_eval, ridge_loss)\n # report('[multiclass] Ridge GD (val)', w, val_x, val_y, lmb, multiclass_eval, ridge_loss)\n\n # # ridge coordinate descent\n # ridge_cd_settings: CDSettings = {'epochs': 150, 'report_interval': 10}\n # for lmb in [0.2]:\n # w = coordinate_descent(train_x, train_y, lmb, ridge_cd_weight_update, ridge_loss, ridge_cd_settings)\n # report('[multiclass] Ridge CD (train)', w, train_x, train_y, lmb, multiclass_eval, ridge_loss)\n # report('[multiclass] Ridge CD (val)', w, val_x, val_y, lmb, multiclass_eval, ridge_loss)\n\n # # lasso GD\n # lasso_gd_settings: GDSettings = {'lr': 0.02, 'epochs': 3000, 'report_interval': 500}\n # for lmb in [0.2]:\n # w = gradient_descent(train_x, train_y, lmb, lasso_loss, lasso_gradient, lasso_gd_settings)\n # report('[multiclass] Lasso GD (train) lambda={}'.format(lmb), w, train_x, train_y, lmb, multiclass_eval, lasso_loss)\n # report('[multiclass] Lasso GD (val) lambda={}'.format(lmb), w, val_x, val_y, lmb, multiclass_eval, lasso_loss)\n\n # lasso CD\n lasso_cd_settings: CDSettings = {\"epochs\": 150, \"report_interval\": 10}\n for lmb in [0.01]:\n w, record = coordinate_descent(\n train_x, train_y, lmb, lasso_cd_weight_update, lasso_loss, lasso_cd_settings\n )\n report(\n \"[multiclass] Lasso CD (train) lambda={}\".format(lmb),\n w,\n train_x,\n train_y,\n lmb,\n multiclass_eval,\n lasso_loss,\n )\n report(\n \"[multiclass] Lasso CD (val) lambda={}\".format(lmb),\n w,\n val_x,\n val_y,\n lmb,\n multiclass_eval,\n lasso_loss,\n )",
"def test_confidence_thresholding_2thresholds_3d_vis_api(csv_filename):\n input_features = [\n text_feature(encoder={\"vocab_size\": 10, \"min_len\": 1, \"type\": \"stacked_cnn\"}),\n number_feature(),\n category_feature(encoder={\"vocab_size\": 10, \"embedding_size\": 5}),\n set_feature(),\n sequence_feature(encoder={\"vocab_size\": 10, \"max_len\": 10, \"type\": \"embed\"}),\n ]\n output_features = [\n category_feature(decoder={\"vocab_size\": 2}, reduce_input=\"sum\"),\n category_feature(decoder={\"vocab_size\": 2}, reduce_input=\"sum\"),\n ]\n encoder = \"parallel_cnn\"\n with TemporaryDirectory() as tmpvizdir:\n # Generate test data\n data_csv = generate_data(input_features, output_features, os.path.join(tmpvizdir, csv_filename))\n input_features[0][ENCODER][TYPE] = encoder\n model = run_api_experiment(input_features, output_features)\n test_df, train_df, val_df = obtain_df_splits(data_csv)\n _, _, output_dir = model.train(\n training_set=train_df, validation_set=val_df, output_directory=os.path.join(tmpvizdir, \"results\")\n )\n test_stats, predictions, _ = model.evaluate(\n dataset=test_df, collect_predictions=True, output_directory=output_dir\n )\n\n output_feature_name1 = output_features[0][\"name\"]\n output_feature_name2 = output_features[1][\"name\"]\n\n ground_truth_metadata = model.training_set_metadata\n feature1_cols = [\n f\"{output_feature_name1}_probabilities_{label}\"\n for label in ground_truth_metadata[output_feature_name1][\"idx2str\"]\n ]\n feature2_cols = [\n f\"{output_feature_name2}_probabilities_{label}\"\n for label in ground_truth_metadata[output_feature_name2][\"idx2str\"]\n ]\n\n # probabilities need to be list of lists containing each row data from the\n # probability columns ref: https://ludwig-ai.github.io/ludwig-docs/latest/user_guide/api/LudwigModel#evaluate\n probability1 = predictions.loc[:, feature1_cols].values\n probability2 = predictions.loc[:, feature2_cols].values\n\n target_predictions1 = test_df[output_feature_name1]\n target_predictions2 = test_df[output_feature_name2]\n ground_truth1 = np.asarray(\n [ground_truth_metadata[output_feature_name1][\"str2idx\"][prediction] for prediction in target_predictions1]\n )\n ground_truth2 = np.asarray(\n [ground_truth_metadata[output_feature_name2][\"str2idx\"][prediction] for prediction in target_predictions2]\n )\n viz_outputs = (\"pdf\", \"png\")\n for viz_output in viz_outputs:\n vis_output_pattern_pdf = os.path.join(output_dir, f\"*.{viz_output}\")\n visualize.confidence_thresholding_2thresholds_3d(\n [probability1, probability2],\n [ground_truth1, ground_truth2],\n model.training_set_metadata,\n [output_feature_name1, output_feature_name2],\n labels_limit=0,\n output_directory=output_dir,\n file_format=viz_output,\n )\n figure_cnt = glob.glob(vis_output_pattern_pdf)\n assert 1 == len(figure_cnt)",
"def run_yolo(net, image, coco_classes, save_image=False):\n\n global frame, classes\n # Give the configuration and weight files for the model and load the network using them.\n classes = coco_classes\n\n frame = cv2.imread(str(image))\n\n # Crop the frame\n # (y_min, y_max) (x_min, x_max)\n # frame = frame[300:1080, 200:1920] # Classifying people\n # frame = frame[0:500, 0:1920] # Classifying Cars\n\n # Stop the program if reached end of video\n if frame is None:\n return\n\n # Create a 4D blob from a frame.\n blob = cv2.dnn.blobFromImage(\n frame, 1 / 255, (inpWidth, inpHeight), [0, 0, 0], 1, crop=False\n )\n\n # Sets the input to the network\n net.setInput(blob)\n\n # Runs the forward pass to get output of the output layers\n outs = net.forward(getOutputsNames(net))\n\n # Remove the bounding boxes with low confidence\n postprocess(frame, outs, save_image)\n\n # Get the overall time for inference(t) and the timings for each of the layers(in layersTimes)\n t, _ = net.getPerfProfile()\n label = \"Inference time: %.2f ms\" % (t * 1000.0 / cv2.getTickFrequency())\n # cv2.putText(frame, label, (0, 15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255))\n print(label)\n\n # Save image with all bounding boxes\n # utils.write_image(frame)",
"def run(self):\n r = rospy.Rate(30)\n\n net = darknet.load_net(b\"/home/nvidia/darknet/cfg/yolov3-tiny.cfg\", b\"/home/nvidia/darknet/yolov3-tiny.weights\", 0)\n meta = darknet.load_meta(b\"/home/nvidia/darknet/cfg/coco.data\")\n\n # cv2.namedWindow(\"cv2_img\", cv2.WINDOW_NORMAL)\n\n while not rospy.is_shutdown():\n if self.last_img is not None:\n self.do_stuff(net,meta)\n r.sleep()",
"def detect_video(yolo_v3_model, video_path, batch_frames, output_path, train_input_size, classes_file_path, \n score_threshold, iou_threshold, num_of_anchor_bbox, strides, anchors, show = False, \n rectangle_colors = ''):\n \n # obtain number of classes\n num_of_classes = len(read_class_names(classes_file_path))\n \n # obtain VideoCapture object \n vid = cv2.VideoCapture(video_path)\n \n # obtain width, height and fps of video\n # by default VideoCapture returns float instead of int\n width = int(vid.get(cv2.CAP_PROP_FRAME_WIDTH))\n height = int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))\n fps = int(vid.get(cv2.CAP_PROP_FPS))\n\n # obtain video codec\n codec = cv2.VideoWriter_fourcc(*'XVID')\n \n # obtain output_path\n # output_path must be .mp4\n out = cv2.VideoWriter(output_path, codec, fps+1, (width, height)) \n\n # create list to store images\n images = []\n \n # variable to track frame\n frame = 0 \n \n while True:\n \n try:\n \n # grabs, decodes and returns the next video frame\n _, image = vid.read()\n \n # append original image to original_images list\n images.append(image[:])\n \n # increment frame\n frame += 1\n \n \n # if current frame is less than batch_frames\n if frame < batch_frames:\n \n # move to next frame \n continue\n \n # iterate over images in chronological order (last image is image of interest to put bbox)\n for x in range(batch_frames):\n \n # convert original image to grayscale \n image = cv2.cvtColor(images[-batch_frames + x + 1], cv2.COLOR_BGR2RGB)\n \n # preprocess image\n image = transform_images(image[:], train_input_size)\n \n # obtain concat frame if none exist\n if x == 0: \n \n concat_image = image[:]\n \n # concatenate subsequent frames to concat_image\n else:\n \n concat_image = np.concatenate((concat_image, image), axis = -1)\n \n except:\n \n break\n \n # add batch dimensions to concatenated image \n concat_image = concat_image[np.newaxis, ...].astype(np.float32)\n \n # create constant tensor from concatenated image and feed it to yolo_v3_model\n batched_input = tf.constant(concat_image)\n yolo_output = yolo_v3_model(batched_input)\n\n # list to store bboxes from respective scales\n pred_bbox = []\n\n # iterate over 3 scales\n for i in range(3):\n\n # decode resepctive yolo_output from each scale\n pred_result = decode(yolo_output = yolo_output[i], num_of_anchor_bbox = num_of_anchor_bbox, \n classes = num_of_classes, strides = strides, anchors = anchors, index = i)\n\n # append to pred_bbox\n pred_bbox.append(pred_result)\n \n # obtain results of shape (:, 5 + num_classes), i.e all bboxes\n pred_bbox = [tf.reshape(x, (-1, tf.shape(x)[-1])) for x in pred_bbox]\n \n # concatenate all bboxes from all scales\n pred_bbox = tf.concat(pred_bbox, axis = 0)\n\n # post process all bboxes using latest image in orignal_images\n bboxes = postprocess_boxes(pred_bbox, images[-1], train_input_size, score_threshold)\n\n # non maximal supression for bboxes\n bboxes = nms(bboxes, iou_threshold, method = 'nms')\n\n # draw bbox on latest image in orignal_images\n image = draw_bbox(images[-1], bboxes, classes_file_path, rectangle_colors = rectangle_colors)\n \n # save image frame to video path if path to save is given\n if output_path != '': out.write(image)\n \n # display image frame (i.e play video) if show is true \n if show:\n \n # show the image\n cv2.imshow('output', image)\n \n # if q key is presssed\n if cv2.waitKey(25) & 0xFF == ord(\"q\"):\n \n # end session\n cv2.destroyAllWindows()\n \n # break out of while loop\n break\n \n # When everything done, release the capture\n vid.release()\n cv2.destroyAllWindows()",
"def yolo_body(inputs, num_anchors, num_classes, architecture=\"yolov4\", base_ops=DarknetConv2D_BN_Leaky):\n if architecture == \"yolov4\":\n config = get_yolo_config(\"yolov4\", num_anchors, num_classes, base_ops=base_ops)\n outputs = spatial_pyramid_block(cspdarknet_body(inputs), base_ops=base_ops) if config.spp else cspdarknet_body(\n inputs)\n body = Model(inputs, outputs)\n features = [body.layers[131].output, body.layers[204].output, body.output] # mish_37 58\n elif architecture == \"yolov4_efficientnetb0\":\n config = get_yolo_config(\"yolov4\", num_anchors, num_classes, base_ops=base_ops)\n backbone = EfficientNetB0(include_top=False, weights=None, input_tensor=inputs)\n\n outputs = spatial_pyramid_block(\n backbone.get_layer(\"top_activation\").output, base_ops=base_ops) if config.spp else backbone.get_layer(\n \"top_activation\").output\n body = Model(inputs, outputs)\n features = [body.get_layer(\"block4a_expand_activation\").output,\n body.get_layer(\"block6a_expand_activation\").output,\n body.output]\n elif architecture == \"yolov4_efficientnetb1\":\n config = get_yolo_config(\"yolov4\", num_anchors, num_classes, base_ops=base_ops)\n backbone = EfficientNetB1(include_top=False, weights=None, input_tensor=inputs, activation=\"relu\")\n outputs = spatial_pyramid_block(\n backbone.get_layer(\"top_activation\").output, base_ops=base_ops) if config.spp else backbone.get_layer(\n \"top_activation\").output\n body = Model(inputs, outputs)\n features = [body.get_layer(\"block4a_expand_activation\").output,\n body.get_layer(\"block6a_expand_activation\").output,\n body.output]\n elif architecture == \"yolov4\":\n config = get_yolo_config(\"yolov4_efficientnetb0\", num_anchors, num_classes, base_ops=base_ops)\n backbone = EfficientNetB2(include_top=False, weights=None, input_tensor=inputs, activation=\"relu\")\n outputs = spatial_pyramid_block(\n backbone.get_layer(\"top_activation\").output, base_ops=base_ops) if config.spp else backbone.get_layer(\n \"top_activation\").output\n body = Model(inputs, outputs)\n features = [body.get_layer(\"block4a_expand_activation\").output,\n body.get_layer(\"block6a_expand_activation\").output,\n body.output]\n elif architecture == \"yolov4_efficientnetliteb1\":\n config = get_yolo_config(\"yolov4\", num_anchors, num_classes, base_ops=base_ops)\n backbone = EfficientNetLiteB1(include_top=False, weights=None, input_tensor=inputs, activation=\"relu\")\n outputs = spatial_pyramid_block(\n backbone.get_layer(\"top_activation\").output, base_ops=base_ops) if config.spp else backbone.get_layer(\n \"top_activation\").output\n body = Model(inputs, outputs)\n features = [body.get_layer(\"block4a_expand_activation\").output,\n body.get_layer(\"block6a_expand_activation\").output,\n body.output]\n elif architecture == \"yolov4_efficientnetliteb2\":\n config = get_yolo_config(\"yolov4\", num_anchors, num_classes, base_ops=base_ops)\n backbone = EfficientNetLiteB2(include_top=False, weights=None, input_tensor=inputs, activation=\"relu\")\n outputs = spatial_pyramid_block(\n backbone.get_layer(\"top_activation\").output, base_ops=base_ops) if config.spp else backbone.get_layer(\n \"top_activation\").output\n body = Model(inputs, outputs)\n features = [body.get_layer(\"block4a_expand_activation\").output,\n body.get_layer(\"block6a_expand_activation\").output,\n body.output]\n elif architecture == \"yolov4_efficientnetliteb3\":\n config = get_yolo_config(\"yolov4\", num_anchors, num_classes, base_ops=base_ops)\n backbone = EfficientNetLiteB3(include_top=False, weights=None, input_tensor=inputs, activation=\"relu\")\n outputs = spatial_pyramid_block(\n backbone.get_layer(\"top_activation\").output, base_ops=base_ops) if config.spp else backbone.get_layer(\n \"top_activation\").output\n body = Model(inputs, outputs)\n features = [body.get_layer(\"block4a_expand_activation\").output,\n body.get_layer(\"block6a_expand_activation\").output,\n body.output]\n elif architecture == \"yolov4_efficientnetliteb4\":\n config = get_yolo_config(\"yolov4\", num_anchors, num_classes, base_ops=base_ops)\n backbone = EfficientNetLiteB4(include_top=False, weights=None, input_tensor=inputs, activation=\"relu\")\n outputs = spatial_pyramid_block(\n backbone.get_layer(\"top_activation\").output, base_ops=base_ops) if config.spp else backbone.get_layer(\n \"top_activation\").output\n body = Model(inputs, outputs)\n features = [body.get_layer(\"block4a_expand_activation\").output,\n body.get_layer(\"block6a_expand_activation\").output,\n body.output]\n elif architecture == \"yolov4_mobilenetv2\":\n config = get_yolo_config(\"yolov4\", num_anchors, num_classes, base_ops=base_ops)\n backbone = MobileNetV2(include_top=False, weights=None, input_tensor=inputs)\n outputs = spatial_pyramid_block(\n backbone.get_layer(\"out_relu\").output, base_ops=base_ops) if config.spp else backbone.get_layer(\n \"out_relu\").output\n body = Model(inputs, outputs)\n features = [body.get_layer(\"block_6_expand_relu\").output,\n body.get_layer(\"block_13_expand_relu\").output,\n body.output]\n elif architecture == \"yolov3_efficientnetliteb4_spp\":\n config = get_yolo_config(\"yolov3\", num_anchors, num_classes, base_ops=base_ops)\n config.agg_method = \"fpn\"\n backbone = EfficientNetLiteB4(include_top=False, weights=None, input_tensor=inputs, activation=\"relu\")\n outputs = spatial_pyramid_block(backbone.get_layer(\"top_activation\").output, base_ops=base_ops)\n body = Model(inputs, outputs)\n features = [body.get_layer(\"block4a_expand_activation\").output,\n body.get_layer(\"block6a_expand_activation\").output,\n body.output]\n elif architecture == \"yolov3_efficientnetliteb0_spp\":\n config = get_yolo_config(\"yolov3\", num_anchors, num_classes, base_ops=base_ops)\n config.agg_method = \"fpn\"\n backbone = EfficientNetB0(include_top=False, weights=None, input_tensor=inputs, activation=\"relu\")\n outputs = spatial_pyramid_block(backbone.get_layer(\"top_activation\").output, base_ops=base_ops)\n body = Model(inputs, outputs)\n features = [body.get_layer(\"block4a_expand_activation\").output,\n body.get_layer(\"block6a_expand_activation\").output,\n body.output]\n elif architecture == \"yolov3_efficientnetliteb1_spp\":\n config = get_yolo_config(\"yolov3\", num_anchors, num_classes, base_ops=base_ops)\n config.agg_method = \"fpn\"\n backbone = EfficientNetLiteB1(include_top=False, weights=None, input_tensor=inputs, activation=\"relu\")\n outputs = spatial_pyramid_block(backbone.get_layer(\"top_activation\").output, base_ops=base_ops)\n body = Model(inputs, outputs)\n features = [body.get_layer(\"block4a_expand_activation\").output,\n body.get_layer(\"block6a_expand_activation\").output,\n body.output]\n elif architecture == \"yolov3_efficientnetliteb2_spp\":\n config = get_yolo_config(\"yolov3\", num_anchors, num_classes, base_ops=base_ops)\n config.agg_method = \"fpn\"\n backbone = EfficientNetLiteB2(include_top=False, weights=None, input_tensor=inputs, activation=\"relu\")\n outputs = spatial_pyramid_block(backbone.get_layer(\"top_activation\").output, base_ops=base_ops)\n body = Model(inputs, outputs)\n features = [body.get_layer(\"block4a_expand_activation\").output,\n body.get_layer(\"block6a_expand_activation\").output,\n body.output]\n elif architecture == \"yolov3_efficientnetb0\":\n config = get_yolo_config(\"yolov3\", num_anchors, num_classes, base_ops=base_ops)\n backbone = EfficientNetB0(include_top=False, weights=None, input_tensor=inputs, activation=\"relu\")\n\n outputs = spatial_pyramid_block(\n backbone.get_layer(\"top_activation\").output, base_ops=base_ops) if config.spp else backbone.get_layer(\n \"top_activation\").output\n body = Model(inputs, outputs)\n features = [body.get_layer(\"block4a_expand_activation\").output,\n body.get_layer(\"block6a_expand_activation\").output,\n body.output]\n elif architecture == \"yolov3_efficientnetb1\":\n config = get_yolo_config(\"yolov3\", num_anchors, num_classes, base_ops=base_ops)\n backbone = EfficientNetB1(include_top=False, weights=None, input_tensor=inputs, activation=\"relu\")\n outputs = spatial_pyramid_block(\n backbone.get_layer(\"top_activation\").output, base_ops=base_ops) if config.spp else backbone.get_layer(\n \"top_activation\").output\n body = Model(inputs, outputs)\n features = [body.get_layer(\"block4a_expand_activation\").output,\n body.get_layer(\"block6a_expand_activation\").output,\n body.output]\n elif architecture == \"yolov3_efficientnetb1_spp\":\n config = get_yolo_config(\"yolov3\", num_anchors, num_classes, base_ops=base_ops)\n config.spp = True\n backbone = EfficientNetLiteB1(include_top=False, weights=None, input_tensor=inputs, activation=\"relu\")\n outputs = spatial_pyramid_block(\n backbone.get_layer(\"top_activation\").output, base_ops=base_ops) if config.spp else backbone.get_layer(\n \"top_activation\").output\n body = Model(inputs, outputs)\n features = [body.get_layer(\"block4a_expand_activation\").output,\n body.get_layer(\"block6a_expand_activation\").output,\n body.output]\n elif architecture == \"yolov3_efficientnetliteb1\":\n config = get_yolo_config(\"yolov3\", num_anchors, num_classes, base_ops=base_ops)\n backbone = EfficientNetLiteB1(include_top=False, weights=None, input_tensor=inputs, activation=\"relu\")\n outputs = spatial_pyramid_block(\n backbone.get_layer(\"top_activation\").output, base_ops=base_ops) if config.spp else backbone.get_layer(\n \"top_activation\").output\n body = Model(inputs, outputs)\n features = [body.get_layer(\"block4a_expand_activation\").output,\n body.get_layer(\"block6a_expand_activation\").output,\n body.output]\n elif architecture == \"yolov3_mobilenetv2_spp\":\n config = get_yolo_config(\"yolov3\", num_anchors, num_classes, base_ops=base_ops)\n config.agg_method = \"fpn\"\n backbone = MobileNetV2(include_top=False, weights=None, input_tensor=inputs)\n outputs = spatial_pyramid_block(\n backbone.get_layer(\"out_relu\").output, base_ops=base_ops) if config.spp else backbone.get_layer(\n \"out_relu\").output\n body = Model(inputs, outputs)\n features = [body.get_layer(\"block_6_expand_relu\").output,\n body.get_layer(\"block_13_expand_relu\").output,\n body.output]\n else:\n config = get_yolo_config(\"yolov3\", num_anchors, num_classes, base_ops=base_ops)\n # print(config)\n outputs = spatial_pyramid_block(darknet_body(inputs), base_ops=base_ops) if config.spp else darknet_body(inputs)\n body = Model(inputs, outputs)\n features = [body.layers[92].output, body.layers[152].output, body.output]\n pass\n # print(config.agg_method)\n if config.agg_method == \"panet\":\n new_features = pan_network(features, config)\n y1, y2, y3 = new_features[::-1]\n else:\n new_features = fpn_network(features, config)\n y1, y2, y3 = new_features[::-1]\n\n return Model(inputs, [y1, y2, y3])",
"def get_mobilenet_v3(model_name:str, pretrained=True, **kwargs) -> nn.Module:\n\n mbconfig = partial(MBConvConfig, depth_mult=1.0, width_mult=1.0, norm_layer=nn.BatchNorm2d,\n se_act2=partial(nn.Hardsigmoid, inplace=True), se_reduction_ratio=4, se_reduce_mode='adjust')\n\n if model_name == 'mobilenet_v3_large':\n residual_config = [\n # expand k s in out layers act\n mbconfig(1, 3, 1, 16, 16, 1, act=nn.ReLU, use_se=False),\n mbconfig(4, 3, 2, 16, 24, 1, act=nn.ReLU, use_se=False),\n mbconfig(3, 3, 1, 24, 24, 1, act=nn.ReLU, use_se=False),\n mbconfig(3, 5, 2, 24, 40, 1, act=nn.ReLU, use_se=True),\n mbconfig(3, 5, 1, 40, 40, 2, act=nn.ReLU, use_se=True),\n mbconfig(6, 3, 2, 40, 80, 1, act=nn.Hardswish, use_se=False),\n mbconfig(2.5, 3, 1, 80, 80, 1, act=nn.Hardswish, use_se=False),\n mbconfig(2.3, 3, 1, 80, 80, 1, act=nn.Hardswish, use_se=False),\n mbconfig(2.3, 3, 1, 80, 80, 1, act=nn.Hardswish, use_se=False),\n mbconfig(6, 3, 1, 80, 112, 1, act=nn.Hardswish, use_se=True),\n mbconfig(6, 3, 1, 112, 112, 1, act=nn.Hardswish, use_se=True),\n mbconfig(6, 5, 2, 112, 160, 1, act=nn.Hardswish, use_se=True),\n mbconfig(6, 5, 1, 160, 160, 1, act=nn.Hardswish, use_se=True),\n mbconfig(6, 5, 1, 160, 160, 1, act=nn.Hardswish, use_se=True),\n ]\n last_channel = 1280\n elif model_name == 'mobilenet_v3_small':\n residual_config = [\n # expand k s in out layers act\n mbconfig(1, 3, 2, 16, 16, 1, act=nn.ReLU, use_se=True),\n mbconfig(4.5, 3, 2, 16, 24, 1, act=nn.ReLU, use_se=False),\n mbconfig(3.5, 3, 1, 24, 24, 1, act=nn.ReLU, use_se=False),\n mbconfig(4, 5, 2, 24, 40, 1, act=nn.Hardswish, use_se=True),\n mbconfig(6, 5, 1, 40, 40, 1, act=nn.Hardswish, use_se=True),\n mbconfig(6, 5, 1, 40, 40, 1, act=nn.Hardswish, use_se=True),\n mbconfig(3, 5, 1, 40, 48, 1, act=nn.Hardswish, use_se=True),\n mbconfig(3, 5, 1, 48, 48, 1, act=nn.Hardswish, use_se=True),\n mbconfig(6, 5, 2, 48, 96, 1, act=nn.Hardswish, use_se=True),\n mbconfig(6, 5, 1, 96, 96, 1, act=nn.Hardswish, use_se=True),\n mbconfig(6, 5, 1, 96, 96, 1, act=nn.Hardswish, use_se=True),\n ]\n last_channel = 1024\n\n model = MobileNetV3(residual_config, last_channel=last_channel, block=MBConvSE, act_layer=nn.Hardswish, norm_layer=nn.BatchNorm2d)\n\n mobilenet_v2_init(model)\n\n if pretrained:\n load_from_zoo(model, model_name)\n\n return model",
"def forward(self, x):\n for name, module in self.base._modules.items():\n if name == 'avgpool':\n break\n\n if name == 'layer3':\n l2 = Variable(x)\n\n x = Variable(module(x))\n l4 = Variable(x)\n\n \"\"\"for name, param in self.base.named_parameters():\n print(name, param.size())\n\n res50_model = self.base\n res50_conv2 = ResNet50Bottom(res50_model)\n for i,child in enumerate(self.base.children()):\n print(i)\n if i==8:\n l4=x\n break\n if i==6:\n l2=x\n x=res50_conv2(x.detach())\"\"\"\n\n s2 = l2.sum(1) #/ 100\n #\n s4 = l4.sum(1) #/ 1000\n\n\n sw2 = s2 / (s2.view(x.size(0), -1).sum(1)).unsqueeze(1).unsqueeze(2)\n\n sw4 = s4 / (s4.view(x.size(0), -1).sum(1)).unsqueeze(1).unsqueeze(2)\n\n\n l2 = l2 * sw2.unsqueeze(1)\n l4 = l4 * sw4.unsqueeze(1)\n\n \n c2 = self.inconv2(l2)\n c4 = self.inconv4(l4)\n c2 = self.bn2(c2)\n c4 = self.bn4(c4)\n \n n2 = F.softmax(torch.mean(torch.mean(c2, dim=2), dim=2), dim=1)\n n4 = F.softmax(torch.mean(torch.mean(c4, dim=2), dim=2), dim=1)\n nn2 = n2.data.cpu().numpy()\n nn4 = n4.data.cpu().numpy()\n cam2 = np.zeros((x.size(0), 28, 28), dtype=float)\n cam4 = np.zeros((x.size(0), 7, 7), dtype=float)\n\n\n for i in range(0, x.size(0)):\n for j in range(0, 2):\n temp1 = c2[i, j, :, :].data.cpu().numpy()\n temp1 = np.maximum(temp1, 0)\n temp1 = temp1 - np.min(temp1)\n temp1 = temp1 / (np.max(temp1)+1e-8)\n cam2[i] = cam2[i] + nn2[i, j] * temp1\n cam2 = torch.FloatTensor(cam2)\n l2 = l2 * (cam2.unsqueeze(1).cuda())\n l2 = self.stack1(l2)\n l2 = self.stack1_1(l2)\n\n for i in range(0, x.size(0)):\n for j in range(0, 8):\n temp2 = c4[i, j, :, :].data.cpu().numpy()\n temp2 = np.maximum(temp2, 0)\n temp2 = temp2 - np.min(temp2)\n temp2 = temp2 / (np.max(temp2)+1e-8)\n cam4[i] =cam4[i] + nn4[i, j] * temp2\n cam4 = torch.FloatTensor(cam4)\n l4 = l4 * cam4.unsqueeze(1).cuda()\n l4 = self.stack3(l4)\n X = l2.view(x.size(0), 512, 7 ** 2)\n Y = l4.view(x.size(0), 512, 7 ** 2)\n Z = self.cross_bilinear(X, Y)\n return n2, n4, Z",
"def L14_Net112(mode=\"train\"):\n data = mx.symbol.Variable(name=\"data\")\n landmark_target = mx.symbol.Variable(name=\"landmark_target\")\n landmark_vis = mx.symbol.Variable(name=\"landmark_vis\")\n \n # data = 112X112\n # conv1 = 56X56\n conv1 = Conv(data, num_filter=res_base_dim, kernel=(3, 3), pad=(1, 1), stride=(2, 2), name=\"conv1\")\n conv2 = Residual(conv1, num_block=1, num_out= res_base_dim, kernel=(3, 3), stride=(1, 1), pad=(1, 1), num_group=res_base_dim, name=\"res2\")\n \n\t#conv23 = 28X28\n conv23 = DResidual(conv2, num_out=res_base_dim*2, kernel=(3, 3), stride=(2, 2), pad=(1, 1), num_group=res_base_dim*2, name=\"dconv23\")\n conv3 = Residual(conv23, num_block=2, num_out=res_base_dim*2, kernel=(3, 3), stride=(1, 1), pad=(1, 1), num_group=res_base_dim*2, name=\"res3\")\n \n\t#conv34 = 14X14\n conv34 = DResidual(conv3, num_out=res_base_dim*4, kernel=(3, 3), stride=(2, 2), pad=(1, 1), num_group=res_base_dim*4, name=\"dconv34\")\n conv4 = Residual(conv34, num_block=3, num_out=res_base_dim*4, kernel=(3, 3), stride=(1, 1), pad=(1, 1), num_group=res_base_dim*4, name=\"res4\")\n \n\t#conv45 = 7X7\n conv45 = DResidual(conv4, num_out=res_base_dim*8, kernel=(3, 3), stride=(2, 2), pad=(1, 1), num_group=res_base_dim*8, name=\"dconv45\")\n conv5 = Residual(conv45, num_block=2, num_out=res_base_dim*8, kernel=(3, 3), stride=(1, 1), pad=(1, 1), num_group=res_base_dim*8, name=\"res5\")\n \n\t# conv6 = 1x1\n conv6 = Conv(conv5, num_filter=res_base_dim*8, kernel=(7, 7), pad=(0, 0), stride=(1, 1), name=\"conv6\")\n fc1 = Conv(conv6, num_filter=res_base_dim*16, kernel=(1, 1), pad=(0, 0), stride=(1, 1), name=\"fc1\")\n fc2 = Conv(fc1, num_filter=res_base_dim*32, kernel=(1, 1), pad=(0, 0), stride=(1, 1), name=\"fc2\")\t\n conv6_3 = mx.symbol.FullyConnected(data=fc2, num_hidden=42, name=\"conv6_3\")\t\n bn6_3 = mx.sym.BatchNorm(data=conv6_3, name='bn6_3', fix_gamma=False,momentum=0.9)\n\t\n if mode == \"test\":\n landmark_pred = bn6_3\n group = mx.symbol.Group([landmark_pred])\n else:\n \n out = mx.symbol.Custom(landmark_vis = landmark_vis, landmark_pred=bn6_3, landmark_target=landmark_target, \n op_type='negativemining_hand21', name=\"negative_mining\")\n group = mx.symbol.Group([out])\n \n return group",
"def yolo_forward(net, LABELS, image, confidence_level, save_image=False):\n\n # initialize a list of colors to represent each possible class label\n np.random.seed(42)\n colors = np.random.randint(0, 255, size=(10000, 3),\n dtype='uint8')\n\n # grab image spatial dimensions\n (H, W) = image.shape[:2]\n\n # determine only the *output* layer names that we need from YOLO\n ln = net.getLayerNames()\n ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]\n\n # construct a blob from the input image and then perform a forward\n # pass of the YOLO object detector, giving us our bounding boxes and\n # associated probabilities\n # also time it\n blob = cv2.dnn.blobFromImage(image, 1 / 255.0, (416, 416),\n swapRB=True, crop=False)\n net.setInput(blob)\n start = time.time()\n layer_outputs = net.forward(ln)\n end = time.time()\n\n # show timing information on YOLO\n print('[INFO] YOLO took {:.6f} seconds'.format(end - start))\n\n # initialize our lists of detected bounding boxes, confidences, and\n # class IDs, respectively\n boxes = []\n confidences = []\n class_ids = []\n\n # loop over each of the layer outputs\n for output in layer_outputs:\n # loop over each of the detections\n for detection in output:\n # extract the class ID and confidence (i.e., probability) of\n # the current object detection\n scores = detection[5:]\n class_id = np.argmax(scores)\n confidence = scores[class_id]\n\n # filter out weak predictions by ensuring the detected\n # probability is greater than the minimum probability\n if confidence > confidence_level:\n # scale the bounding box coordinates back relative to the\n # size of the image, keeping in mind that YOLO actually\n # returns the center (x, y)-coordinates of the bounding\n # box followed by the boxes' width and height\n box = detection[0:4] * np.array([W, H, W, H])\n (centerX, centerY, width, height) = box.astype('int')\n\n # use the center (x, y)-coordinates to derive the top and\n # and left corner of the bounding box\n x = int(centerX - (width / 2))\n y = int(centerY - (height / 2))\n\n # update our list of bounding box coordinates, confidences,\n # and class IDs\n boxes.append([x, y, int(width), int(height)])\n confidences.append(float(confidence))\n class_ids.append(class_id)\n\n # apply non-maxima suppression to suppress weak, overlapping bounding\n # boxes\n # idxs = cv2.dnn.NMSBoxes(boxes, confidences, confidence_level, threshold)\n\n print(class_ids)\n print(LABELS)\n # print(labels)\n\n labels = [LABELS[i] for i in class_ids]\n\n if save_image:\n yolo_save_img(image, class_ids, boxes, labels, confidences, colors, 'python_predictions.jpg')\n\n return class_ids, labels, boxes, confidences",
"def get_yolo_net(cfg_path, weight_path):\n\n if not cfg_path or not weight_path:\n raise Exception('missing inputs. See file.')\n\n print('[INFO] loading YOLO from disk...')\n net = cv2.dnn.readNetFromDarknet(cfg_path, weight_path)\n\n return net",
"def VLocNet_full(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, classes=1000): # pooling=None,\n\n if weights not in {'imagenet', None}:\n raise ValueError('The `weights` argument should be either '\n '`None` (random initialization) or `imagenet` '\n '(pre-training on ImageNet).')\n\n if weights == 'imagenet' and include_top and classes != 1000:\n raise ValueError('If using `weights` as imagenet with `include_top`'\n ' as true, `classes` should be 1000')\n\n # Determine proper input shape\n # input_shape = _obtain_input_shape(input_shape,\n # default_size=224,\n # min_size=197,\n # data_format=K.image_data_format(),\n # include_top=include_top)\n #\n # if input_tensor is None:\n # img_input = Input(shape=input_shape)\n # else:\n # if not K.is_keras_tensor(input_tensor):\n # img_input = Input(tensor=input_tensor, shape=input_shape)\n # else:\n # img_input = input_tensor\n # if K.image_data_format() == 'channels_last':\n # bn_axis = 3\n # else:\n # bn_axis = 1\n\n # 1st branch for the t-1 odometry regression\n input_odo_0 = Input(shape=(224, 224, 3), name='input_odo_0')\n\n odo_1_0 = ResNet_50_unit_1(input_tensor=input_odo_0, bn_axis=3, activation='elu', strides=(2, 2), branch='_odo_t_p')\n\n odo_2_0 = ResNet_50_unit_2(input_tensor=odo_1_0, activation='elu', strides=(1, 1), branch='_odo_t_p')\n\n odo_3_0 = ResNet_50_unit_3(input_tensor=odo_2_0, activation='elu', branch='_odo_t_p')\n\n odo_4_0 = ResNet_50_unit_4(input_tensor=odo_3_0, activation='elu', branch='_odo_t_p')\n\n # 2nd branch for the t odometry regression\n input_odo_1 = Input(shape=(224, 224, 3), name='input_odo_1')\n\n odo_1_1 = ResNet_50_unit_1(input_tensor=input_odo_1, bn_axis=3, activation='elu', strides=(2, 2), branch='_odo_t')\n\n odo_2_1 = ResNet_50_unit_2(input_tensor=odo_1_1, activation='elu', strides=(1, 1), branch='_odo_t')\n\n odo_3_1 = ResNet_50_unit_3(input_tensor=odo_2_1, activation='elu', branch='_odo_t')\n\n odo_4_1 = ResNet_50_unit_4(input_tensor=odo_3_1, activation='elu', branch='_odo_t')\n\n # Concatenate the features from 1st and 2nd branches\n conca = concatenate([odo_4_0, odo_4_1], name='conca')\n\n odo_5 = ResNet_50_unit_5(input_tensor=conca, activation='elu', branch='_odo_all')\n\n # avg_pool = AveragePooling2D((7, 7), name='avg_pool')(odo_5)\n\n odo_glo_ave = GlobalAveragePooling2D()(odo_5)\n\n odo_fc_1 = Dense(1024, name='odo_fc_1')(odo_glo_ave)\n\n odo_fc_2 = Dense(3, name='odo_fc_2')(odo_fc_1)\n odo_fc_3 = Dense(4, name='odo_fc_3')(odo_fc_1)\n\n odo_merge = concatenate([odo_fc_2, odo_fc_3], name='odo_merge') # Modification\n\n # The network branch for the Pose part:\n\n pose_4 = ResNet_50_unit_4(input_tensor=odo_3_1, activation='elu', branch='_geo')\n\n pose_5 = ResNet_50_unit_5(input_tensor=pose_4, activation='elu', branch='_geo')\n\n pose_glo_ave = GlobalAveragePooling2D()(pose_5)\n\n pose_fc_1 = Dense(1024, name='pose_fc_1')(pose_glo_ave)\n\n pose_fc_2 = Dense(3, name='pose_fc_2')(pose_fc_1)\n pose_fc_3 = Dense(4, name='pose_fc_3')(pose_fc_1)\n\n pose_merge = concatenate([odo_fc_2, odo_fc_3, pose_fc_2, pose_fc_3], name='pose_merge') # Modification\n\n # Create model.\n # model = Model(input=[input_odo_0, input_odo_1], output=[odo_fc_2, odo_fc_3, pose_fc_2, pose_fc_3],\n # name='VLocNet_full')\n\n # changed the model from 4 outputs to 2 outputs\n model = Model(input=[input_odo_0, input_odo_1], output=[odo_merge, pose_merge], name='VLocNet_full')\n\n # load weights\n if weights == 'imagenet':\n if include_top:\n weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels.h5',\n WEIGHTS_PATH,\n cache_subdir='models',\n md5_hash='a7b3fe01876f51b976af0dea6bc144eb')\n else:\n weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5',\n WEIGHTS_PATH_NO_TOP,\n cache_subdir='models',\n md5_hash='a268eb855778b3df3c7506639542a6af')\n model.load_weights(weights_path)\n if K.backend() == 'theano':\n layer_utils.convert_all_kernels_in_model(model)\n\n if K.image_data_format() == 'channels_first':\n if include_top:\n maxpool = model.get_layer(name='avg_pool')\n shape = maxpool.output_shape[1:]\n dense = model.get_layer(name='fc1000')\n layer_utils.convert_dense_weights_data_format(dense, shape, 'channels_first')\n\n if K.backend() == 'tensorflow':\n warnings.warn('You are using the TensorFlow backend, yet you '\n 'are using the Theano '\n 'image data format convention '\n '(`image_data_format=\"channels_first\"`). '\n 'For best performance, set '\n '`image_data_format=\"channels_last\"` in '\n 'your Keras config '\n 'at ~/.keras/keras.json.')\n return model",
"def bc_train_nvidia():\n\timg_rows,img_cols = 64,64\n\tinput_shape = (img_rows,img_cols,3)\n\n\t# the model\t\n\tmodel = bc_nvidia_model(input_shape = input_shape)\n\n\t\n\timg_dim = (img_rows,img_cols)\n\n\t# reading the drivelog\t\n\tcsv_data = pd.read_csv(data_path+csv_path,usecols=[\"center\",\"left\",\"right\",\"steering\"])\n\n\tthreshold = 1\n\tbatch_size = 240\n\tepochs = 6\n\tyvals = []\n\n\tfor i in range(epochs):\n\t\tgen = generate_data_train(data_path,csv_data,img_dim,batch_size,threshold,yvals)\n\t\t\n\t\tmodel.fit_generator(gen, samples_per_epoch = 24000, nb_epoch = 1, verbose = 1)\n\n\t\t# thresholding against values close to 0 to balance the data\n\t\tthreshold = 1/(i+1)\n\t\n\t# serialize model to JSON\n\tmodel_json = model.to_json()\n\twith open(\"model.json\", \"w\") as json_file:\n\t json_file.write(model_json)\n\t# serialize weights to HDF5\n\tmodel.save_weights(\"model.h5\")\n\twith open(\"s_angles\",\"wb\") as y_file:\n\t\tpickle.dump(yvals,y_file)\n\treturn",
"def build_predictor(self):\n if self.library == \"yolov5\":\n self.predictor = torch.hub.load('ultralytics/yolov5', self.model_name)\n self.predictor.iou = self.nms_thresh # NMS IoU threshold (0-1)\n if self.library == \"detectron2\":\n cfg = get_cfg()\n cfg.merge_from_file(model_zoo.get_config_file(self.model_name))\n cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5 # set threshold for this model\n cfg.MODEL.NMS_THRESH = self.nms_thresh # NMS IoU threshold\n if self.device == \"cpu\": cfg.MODEL.DEVICE = self.device\n # Find a model from detectron2's model zoo. You can use the https://dl.fbaipublicfiles... url as well\n cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(self.model_name)\n self.predictor = DefaultPredictor(cfg)"
] | [
"0.6369418",
"0.6306215",
"0.6232091",
"0.6182974",
"0.60732025",
"0.5921903",
"0.5916691",
"0.5855269",
"0.5853199",
"0.5839653",
"0.57973075",
"0.5778595",
"0.574271",
"0.573733",
"0.57194585",
"0.5616862",
"0.55473757",
"0.5541811",
"0.554155",
"0.5500795",
"0.5457074",
"0.5437526",
"0.5434107",
"0.54299426",
"0.5425723",
"0.5425672",
"0.5417012",
"0.53627396",
"0.5351046",
"0.5348419"
] | 0.6497924 | 0 |
The uri returned from request.uri is not properly urlencoded (sometimes it's partially urldecoded) This is a weird hack to get werkzeug to return the proper urlencoded string uri | def _get_uri_from_request(request):
uri = request.base_url
if request.query_string:
uri += '?' + request.query_string.decode('utf-8')
return uri | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _urlnorm(self, uri):\r\n (scheme, authority, path, query, fragment) = parse_uri(uri)\r\n if not scheme or not authority:\r\n raise Exception(\"Only absolute URIs are allowed. uri = %s\" % uri)\r\n authority = authority.lower()\r\n scheme = scheme.lower()\r\n if not path:\r\n path = \"/\"\r\n\r\n # Could do syntax based normalization of the URI before\r\n # computing the digest. See Section 6.2.2 of Std 66.\r\n request_uri = query and \"?\".join([path, query]) or path\r\n scheme = scheme.lower()\r\n defrag_uri = scheme + \"://\" + authority + request_uri\r\n\r\n return defrag_uri",
"def uri(self):\n parts = []\n # if I have a scheme\n if self.scheme: parts.append('{}:'.format(self.scheme))\n # if I have an authority\n if self.authority: parts.append('//{}'.format(self.authority))\n # if I have an address\n if self.address: parts.append('{}'.format(self.address))\n # if I have a query\n if self.query: parts.append('?{}'.format(self.query))\n # if I have a fragment\n if self.fragment: parts.append('#{}'.format(self.fragment))\n # assemble and return\n return ''.join(parts)",
"def _base_uri(self) -> str:\n if self.use_original_uri:\n header_value = self.use_original_uri.get(\"header_value\")\n conditions = self.use_original_uri.get(\"claim_conditions\")\n if conditions.get(\"any\"):\n uri = self.request.headers.get(header_value)\n else:\n key = self.claims.get(conditions.get(\"claim_key\"))\n val = self.claims.get(conditions.get(\"claim_value\"))\n if self.claims.get(key) == val:\n uri = self.request.headers.get(header_value)\n else:\n uri = self.request.uri\n else:\n uri = self.request.uri\n if not uri:\n uri = self.request.uri\n return uri.split(\"?\")[0]",
"def get_url():\n if os.environ['SERVER_PORT'] == '80':\n scheme = 'http://'\n else:\n scheme = 'https://'\n host = os.environ['SERVER_NAME']\n script_name = urllib.quote(os.environ.get('SCRIPT_NAME', ''))\n path_info = urllib.quote(os.environ.get('PATH_INFO', ''))\n qs = os.environ.get('QUERY_STRING', '')\n if qs:\n qs = '?' + qs\n return scheme + host + script_name + path_info + qs",
"def cleanUri(uri):\n if not uri.startswith(\"/\") and not uri.startswith('http'):\n uri = \"/\" + uri\n\n if 'http://' in uri or 'https://' in uri:\n uri = uri.split('://')[0] + '://' + \\\n uri.split('://')[1].replace(\"//\", \"/\")\n else:\n uri = uri.replace(\"//\", \"/\")\n\n if uri.endswith(\"/\"):\n uri = uri[:-1]\n\n return uri",
"def get_correct_url(request: flask.Request) -> str:\n\n parsed_url = urlparse(request.url_root)\n request_scheme = request.headers.get('X-Scheme')\n if request_scheme is not None:\n # use the same scheme that the request used\n return parsed_url._replace(scheme=request_scheme).geturl()\n elif parsed_url.scheme == \"http\" and \"localhost\" not in parsed_url.netloc:\n # if the request scheme is unknown use https unless we're referring\n # to localhost\n return parsed_url._replace(scheme=\"https\").geturl()\n else:\n # give up and don't make any changes\n return request.url_root",
"def __redirect_uri(self):\n uri = '%s://%s%s' % (request.scheme, request.hostname,\n request.path_info)\n if request.get_vars:\n uri += '?' + urlencode(request.get_vars)\n return uri",
"def quote_uri(uri):\n import urlparse\n import urllib\n\n up=urlparse.urlparse(uri)\n np=urllib.quote(up[2])\n return urlparse.urlunparse((up[0],up[1],np,up[3],up[4],up[5]))",
"def normalize_uri(uri):\n if isinstance(uri, str):\n uri = uri.decode('utf-8')\n return uri.strip().replace(u' ', u'_')",
"def test_unicode(self):\n iri = u'http://localhost/expos\\xe9?doppelg\\xe4nger=Bryan O\\u2019Sullivan#r\\xe9sum\\xe9'\n uri = b'http://localhost/expos%C3%A9?doppelg%C3%A4nger=Bryan%20O%E2%80%99Sullivan#r%C3%A9sum%C3%A9'\n self.assertEqual(flatten(url.URL.fromString(iri)), uri)",
"def full_url(self):\n return \"%s://%s%s\" % (self.protocol, self.host, self.uri)",
"def raw_url(self) -> str:\n return self.url_as(raw=True)",
"def uri(self, path):\n path = ensure_slash(path)\n return 'http://127.0.0.1:%d%s' % (self.port, path)",
"def __str__(self):\r\n self.query = urllib.urlencode(self.args)\r\n self.query = urllib.unquote(self.query)\r\n return urlparse.urlunparse((self.scheme, self.netloc, self.path, self.params, self.query, self.fragment))",
"def url(self, request_path=\"\"):\n return f\"{self.scheme}://{self.host}/{request_path}\"",
"def iri2uri(uri): \r\n if isinstance(uri ,unicode):\r\n (scheme, authority, path, query, fragment) = urlparse.urlsplit(uri)\r\n authority = authority.encode('idna')\r\n # For each character in 'ucschar' or 'iprivate'\r\n # 1. encode as utf-8\r\n # 2. then %-encode each octet of that utf-8 \r\n uri = urlparse.urlunsplit((scheme, authority, path, query, fragment))\r\n uri = \"\".join([encode(c) for c in uri])\r\n return uri",
"def get_url(self, uri):\n # TODO make this a prepend_if_needed type method\n return urllib.parse.urljoin(self.hostname, uri)",
"def build_url(app, request):\n return '%s%s' % (app.url_root, request.path[1:])",
"def build_url(app, request):\n return '%s%s' % (app.url_root, request.path[1:])",
"def get_uri(self):\n if self._uri is None:\n self._uri = \"{0}{1}/{2}\".format(\n self.session.resource_prefix,\n self.base_uri,\n self.ip_or_ifname_or_group_name,\n )\n\n return self._uri",
"def to_url(request):\r\n scheme, netloc, path, query, fragment = urlsplit(to_utf8(request.url))\r\n query = parse_qs(query)\r\n\r\n for key, value in request.data_and_params.iteritems():\r\n query.setdefault(key, []).append(value)\r\n\r\n query = urllib.urlencode(query, True)\r\n return urlunsplit((scheme, netloc, path, query, fragment))",
"def full_uri(path):\n protocol = 'https' if settings.USE_HTTPS else 'http'\n domain = Site.objects.get_current().domain\n return \"{}://{}{}\".format(protocol, domain, path)",
"def _encode_url(full_url):\n return urllib.parse.quote(full_url, safe=\"%/:=&?~#+!$,;'@()*[]|\")",
"def urlsafe(self):\n # This is 3-4x faster than urlsafe_b64decode()\n urlsafe = base64.b64encode(self.reference().Encode())\n return urlsafe.rstrip('=').replace('+', '-').replace('/', '_')",
"def test_unicode_query_string():\n assert (normalize_url(\"http://example.com/?file=résumé.pdf\") ==\n \"http://example.com/?file=r%C3%A9sum%C3%A9.pdf\")",
"def get_full_url(self, url):\n param_str = self.request.GET.urlencode()\n request_url = u'%s%s' % (self.base_url, url)\n request_url += '?%s' % param_str if param_str else ''\n return request_url",
"def __str__(self):\n if self._str is None:\n # special cases\n if self == URI.INVALID():\n self._str = \"[invalid]\"\n elif self == URI.EMPTY():\n self._str = \"\"\n elif self == URI.INLINE():\n self._str = \"[inline]\"\n elif self == URI.EVAL():\n self._str = \"[eval]\"\n elif not self._isEmpty(self._scheme) and self._isEmpty(self._host) and self._isEmpty(self._port) and self._isEmpty(self._path) and self._isEmpty(self._query):\n self._str = self._scheme + \":\"\n else:\n self._str = \"\"\n if self._scheme in defaults.schemesWithNoDoubleSlash:\n self._str += self._scheme + \":\"\n elif self._scheme is not None:\n self._str += self._scheme + \"://\"\n \n self._str += self._host\n \n if self._port is not None:\n self._str += \":\" + str(self._port)\n \n if self._path is not None:\n self._str += urllib.quote(self._path.encode('utf8')).decode('ascii')\n \n if self._query is not None:\n self._str += \"?\" + self._query\n return self._str",
"def uri_string(self):\n if isinstance(self.entity, int):\n uri_string = \"{{{0}}}\".format(self.entity)\n elif isinstance(self.entity, NodePointer):\n uri_string = \"{{{0}}}\".format(self.entity.address)\n else:\n try:\n uri_string = self.entity.ref\n except AttributeError:\n uri_string = ustr(self.entity)\n if self.segments:\n if not uri_string.endswith(\"/\"):\n uri_string += \"/\"\n uri_string += \"/\".join(map(percent_encode, self.segments))\n return uri_string",
"def _full_url(url, _params={}):\n\n # Support for unicode domain names and paths.\n scheme, netloc, path, params, query, fragment = urlparse.urlparse(url)\n\n if not scheme:\n raise ValueError(\"Invalid URL %r: No schema supplied\" % url)\n\n netloc = netloc.encode('idna')\n\n if isinstance(path, unicode):\n path = path.encode('utf-8')\n\n path = requote_path(path)\n\n url = str(urlparse.urlunparse([scheme, netloc, path, params, query,\n fragment]))\n\n if _params:\n if urlparse.urlparse(url).query:\n return '%s&%s' % (url, _params)\n else:\n return '%s?%s' % (url, _params)\n else:\n return url",
"def getQualifiedURL(uri = None):\n schema, stdport = ('http', '80')\n host = os.environ.get('HTTP_HOST')\n if not host:\n host = os.environ.get('SERVER_NAME')\n port = os.environ.get('SERVER_PORT', '80')\n if port != stdport: host = host + \":\" + port\n result = \"%s://%s\" % (schema, host)\n if uri: result = result + uri\n return result"
] | [
"0.67314285",
"0.65002495",
"0.6443756",
"0.6264452",
"0.6258882",
"0.6235578",
"0.6230169",
"0.6225036",
"0.61642367",
"0.6159104",
"0.61166435",
"0.6091703",
"0.60887986",
"0.6040494",
"0.602881",
"0.6023126",
"0.60162497",
"0.6009078",
"0.6009078",
"0.5996785",
"0.5991015",
"0.5989411",
"0.59613985",
"0.5939465",
"0.59283805",
"0.59275925",
"0.59133846",
"0.5911823",
"0.59070665",
"0.58872634"
] | 0.68531525 | 0 |
Visualize a particular column of Y_pred anf Y_test for a particular series | def visualize_pred(y_test, y_pred, test_seq, window_out, num_plots, num_win_ser, cols_y, col_idx):
ser_idx = [i for i in range(0, len(y_test), num_win_ser)]
if num_plots > len(ser_idx):
print("Too many plots, reduce the mumber")
else:
indx = ser_idx[0:num_plots]
days = range(num_win_ser)
for idx in indx:
CR = test_seq[idx][0][0][3]
pred = y_pred[idx : idx+num_win_ser, window_out -1, col_idx]
true = y_test[idx : idx+num_win_ser, window_out -1, col_idx]
plt.title("Y_True V/S Y_Pred, CR: "+ str(CR))
plt.xlabel('Days')
plt.ylabel(cols_y[col_idx])
plt.plot(days, pred, label = 'Pred')
plt.plot(days, true, label = 'True')
plt.legend()
plt.show() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def actual_pred_plot(preds):\r\n actual_pred = pd.DataFrame(columns=['Cost', 'prediction'])\r\n actual_pred['Cost'] = all_data['2020':].iloc[:, -1][1:len(preds) + 1]\r\n actual_pred['prediction'] = preds[:, -1]\r\n\r\n from keras.metrics import MeanSquaredError\r\n m = MeanSquaredError()\r\n m.update_state(np.array(actual_pred['Cost']), np.array(actual_pred['prediction']))\r\n\r\n return m.result().numpy(), actual_pred.plot()",
"def plot_observations():\n plt.plot(history.history['loss'], label='training_loss')\n plt.plot(history.history['val_loss'], label='val_loss ')\n plt.xlabel('Epoch')\n plt.ylabel('Loss')\n plt.show()\n\n plt.plot(history.history['acc'], label='accuracy')\n plt.plot(history.history['val_acc'], label='val_accuracy')\n plt.xlabel('Epoch')\n plt.ylabel('Accuracy')\n plt.legend(loc='lower right')\n plt.show()\n\n test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)\n print(\"Test Accuracy:\", test_acc)",
"def visualize_data(y_test, x_test, window_out, num_plots, num_win_ser, cols_y, col_idx):\n \n \n ser_idx = [i for i in range(0, len(y_test), num_win_ser)]\n if num_plots > len(ser_idx):\n print(\"Too many plots, reduce the mumber\")\n else:\n indx = ser_idx[0:num_plots]\n days = range(num_win_ser)\n for idx in indx:\n CR = x_test[idx][0][3]\n #pred = y_pred[idx : idx+num_win_ser, window_out -1, col_idx]\n true = y_test[idx : idx+num_win_ser, window_out -1, col_idx]\n \n plt.title(\"Y_True, CR: \"+ str(CR))\n plt.xlabel('Days')\n plt.ylabel(cols_y[col_idx])\n \n #plt.plot(days, pred, label = 'Pred')\n plt.plot(days, true, label = 'True')\n \n plt.legend()\n plt.show()",
"def plot_actual_vs_predicted_by_equations(df, x_variable, y_variables, plot_title):\n #Plot results\n df.plot(x=x_variable, y=y_variables, title=plot_title)\n plt.show()",
"def plotly_table():\n model_data = your_choice()\n model_data[\"test_prediction\"] = list(model_data[\"test_prediction\"])\n \n df = pd.DataFrame(model_data[\"test_prediction\"], columns=[\"test_prediction\"])\n for k,v in model_data.items():\n if k != \"test_prediction\":\n df[k] = str(v)\n\n fig = a_libraries.plotly_table(df)\n\n return fig",
"def test_sarima_model(y, y_test, results, **kwargs):\n \n # Get predictions\n pred = results.get_prediction(start=y_test.index.min(), end=y_test.index.max(), **kwargs)\n y_pred = pred.predicted_mean\n pred_ci = pred.conf_int()\n\n # Calculate some metrics and print them out\n rmse = ((y_pred - y_test) ** 2).mean() ** 0.5\n print('Root Mean Squared Error =', rmse)\n \n r2 = r2_score(y_pred, y_test)\n print('R^2 =', r2)\n \n # Graph\n ax = y.plot(label='observed')\n y_pred.plot(ax=ax, label='predicted', alpha=.7, figsize=(15, 8))\n ax.fill_between(pred_ci.index,\n pred_ci.iloc[:, 0],\n pred_ci.iloc[:, 1], color='k', alpha=.2)\n plt.title('Average Monthly Temperature: Observed vs. Predicted')\n ax.set_xlabel('Date')\n ax.set_ylabel('Temperature')\n plt.legend()\n plt.show()",
"def plot_test(y_test, y_pred, title = None, xlabel = 'Measured $Y = \\log_2(MIC)$', ylabel = 'Predicted $Y = \\log_2(MIC)$', legend = ['Ideal', 'Result'], groups = None):\n \n fig, ax = plt.subplots(1,1)\n fig.set_figheight(5)\n fig.set_figwidth(5)\n if groups is not None:\n groups_obj = pd.concat([y_test,y_pred], axis=1).groupby(np.array(groups))\n cmap=plt.get_cmap('tab10')\n for name, group in groups_obj:\n # Works only for groups with numeric names that are max cmap length:\n ax.plot(group.iloc[:,0], group.iloc[:,1], marker=\"o\", linestyle=\"\", label=int(name), color = cmap.colors[int(name)])\n ax.legend()\n else:\n ax.scatter(y_test,y_pred, color = 'red')\n ax_max = 10\n if np.max(y_test.values)>ax_max:\n ax_max = np.max(y_test).values\n ax_min = 0\n if np.min(y_test.values)<ax_min:\n ax_min = np.min(y_test.values)\n ax.plot([ax_min, ax_max], [ax_min, ax_max], '--', color='black')\n ax.set_aspect('equal', 'box')\n plt.title(title)\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n #plt.savefig(title+'.pdf')\n plt.savefig(title+'.svg')\n #plt.savefig(title+'.png')#, dpi=600)\n #plt.show()",
"def visualize_test(test_data_full, test_data, thetas):\n fig, ax = plt.subplots()\n ax.scatter(test_data_full[\"Weight\"], test_data_full[\"Height\"], color='blue')\n ax.plot(test_data_full[\"Weight\"], predict(test_data, thetas[-1]), color='red', linewidth=2)\n return fig",
"def plot_pred(y, yhat, name, output_dir):\n ax = pd.DataFrame(y, columns=[\"y%s\" % LOOK_AHEAD]).plot(figsize=(15, 10))\n pd.DataFrame(yhat, columns=[\"yhat%s\" % LOOK_AHEAD]).plot(ax=ax)\n plt.title(\"%s\" % name)\n plt.tight_layout()\n plt.savefig(f\"{output_dir / name}.png\")\n\n pd.DataFrame(y-yhat, columns=[f\"yhat {LOOK_AHEAD}\"]).plot(figsize=(15, 10))\n plt.title(\"diff-%s\" % name)\n plt.tight_layout()\n plt.savefig(f\"{output_dir / name}-diff.png\")",
"def plot_scatter(df):\n fig = px.scatter(df, x=\"preds\", y=\"truth\", title=\"Predictions vs True Values\", color=\"mae\")\n wandb.log({f\"Predictions vs True Values\": fig})\n\n # Poor Results\n df = df.query(\"mae > 2\")\n fig = px.scatter(df, x=\"preds\", y=\"truth\", title=\"Predictions vs True Values\", color=\"mae\")\n wandb.log({f\"Predictions vs True Values [mae > 2]\": fig})",
"def show_score(clf, X_test, y_test):\n y_pred = predict(clf, X_test)\n print metrics.classification_report(y_test.astype(np.int), y_pred)",
"def plot_results(actual_time_series, predicted_values, len_train_data,\n y_name='Parameter'):\n\n plt.plot(np.arange(0, len(actual_time_series)),\n actual_time_series, label='Actual values', c='green')\n plt.plot(np.arange(len_train_data, len_train_data + len(predicted_values)),\n predicted_values, label='Predicted', c='blue')\n # Plot black line which divide our array into train and test\n plt.plot([len_train_data, len_train_data],\n [min(actual_time_series), max(actual_time_series)], c='black',\n linewidth=1)\n plt.ylabel(y_name, fontsize=15)\n plt.xlabel('Time index', fontsize=15)\n plt.legend(fontsize=15)\n plt.grid()\n plt.show()",
"def regression_analysis(cls, y_true, y_pred, path=None):\n residual = y_true - y_pred\n print(\"Histogram\")\n cls.histogram(residual, \"Residual\")\n print(\"Scatter\")\n cls.scatter_plot(y_pred, residual, \"pred\", \"residual\", path=path)\n print(\"Scatter\")\n cls.scatter_plot( y_true, y_pred, \"y_test\", \"pred\", path=path)",
"def evaluate(self, y_pred, y_test):\n for i in range(len(y_pred)):\n if y_pred[i] == y_test.iloc[i]:\n self.accuracy += 1\n self.accuracy = (self.accuracy/len(y_pred))",
"def evaluate_model(model, X_test, Y_test, category_names):\n \n \n yPredictorTest = model.predict(X_test)\n \n for idx, col in enumerate(Y_test):\n print(col, classification_report(Y_test[col], yPredictorTest[:, idx]))",
"def evaluate_model(model, X_test, y_test):\n # run prediction with test data\n y_pred = model.predict(X_test)\n\n # print precision, recall and f1-score\n i = 0\n for col in y_test:\n print('Evaluation for \"{}\": \\n {} \\n\\n'.format(col, classification_report(y_test[col], y_pred[:,i])))\n i += 1",
"def plot_result(data, gt_y, pred_y):\n assert data.shape[0] == gt_y.shape[0]\n assert data.shape[0] == pred_y.shape[0]\n\n plt.figure()\n\n plt.subplot(1, 2, 1)\n plt.title('Ground Truth', fontsize=18)\n\n for idx in range(data.shape[0]):\n if gt_y[idx] == 0:\n plt.plot(data[idx][0], data[idx][1], 'ro')\n else:\n plt.plot(data[idx][0], data[idx][1], 'bo')\n\n plt.subplot(1, 2, 2)\n plt.title('Prediction', fontsize=18)\n\n for idx in range(data.shape[0]):\n if pred_y[idx] == 0:\n plt.plot(data[idx][0], data[idx][1], 'ro')\n else:\n plt.plot(data[idx][0], data[idx][1], 'bo')\n\n plt.show()",
"def plot_result(data, gt_y, pred_y):\n assert data.shape[0] == gt_y.shape[0]\n assert data.shape[0] == pred_y.shape[0]\n\n plt.figure()\n\n plt.subplot(1, 2, 1)\n plt.title('Ground Truth', fontsize=18)\n\n for idx in range(data.shape[0]):\n if gt_y[idx] == 0:\n plt.plot(data[idx][0], data[idx][1], 'ro')\n else:\n plt.plot(data[idx][0], data[idx][1], 'bo')\n\n plt.subplot(1, 2, 2)\n plt.title('Prediction', fontsize=18)\n\n for idx in range(data.shape[0]):\n if pred_y[idx] == 0:\n plt.plot(data[idx][0], data[idx][1], 'ro')\n else:\n plt.plot(data[idx][0], data[idx][1], 'bo')\n\n plt.show()",
"def make_results_plot( df, k, reg ):\n\tuid = smalldf['user_id'].values\n\tbid = smalldf['business_id'].values\n\tactual = smalldf['stars'].values\n\tpredicted = np.zeros( len(actual) )\n\tcounter = 0\n\tfor biz_id, user_id in izip( bid, uid ):\n\t\tpredicted[counter] = rating( biz_id, user_id, k = k, reg = reg ) \n\t\tcounter = counter + 1\n\t# compare_results( actual, predicted )",
"def report(self, X, y):\n predict = self.model.predict(X)\n\n skplt.estimators.plot_feature_importances(\n self.model, x_tick_rotation=90)\n plt.show()\n\n fig, ax = plt.subplots(figsize=(7, 7))\n sns.scatterplot(x=y, y=predict)\n lims = [\n np.min([ax.get_xlim(), ax.get_ylim()]), # min of both axes\n np.max([ax.get_xlim(), ax.get_ylim()]), # max of both axes\n ]\n ax.plot(lims, lims, 'k-', alpha=0.75, zorder=0)\n ax.set_aspect('equal')\n ax.set_xlim(lims)\n ax.set_ylim(lims)\n ax.set_xlabel(\"Observed\")\n ax.set_ylabel(\"Predict\")\n ax.set_title(\"Predict vs. Observed\")\n plt.show()\n\n residuals = y - predict\n\n fig, ax = plt.subplots(figsize=(7, 7))\n sns.scatterplot(x=y, y=residuals)\n plt.title(\"Residuals vs. Observed\")\n plt.xlabel(\"Obserbed\")\n plt.ylabel(\"Residuals\")\n plt.show()\n\n plt.hist(residuals)\n plt.title(\"Residuals distribution\")\n plt.xlabel(\"Residuals value\")\n plt.ylabel(\"Count\")\n plt.show()\n\n display(\n pd.DataFrame({\n \"explained_variance_score\":\n metrics.explained_variance_score(y, predict),\n \"mean_absolute_error\":\n metrics.mean_absolute_error(y, predict),\n \"mean_squared_log_error\":\n metrics.mean_squared_log_error(y, predict),\n \"median_absolute_error\":\n metrics.median_absolute_error(y, predict),\n \"r2_score\":\n metrics.r2_score(y, predict)\n },\n index=[0]))",
"def _graph_results(self, X_test, y_test, y_pred):\n if self.regression is None:\n print(\"Regression results aren't available. Have you run linear_regression() yet?\")\n return\n\n if self.attributes.shape[1] > 1:\n print(\"Graphing is supported for one feature only.\")\n return\n\n plt.scatter(X_test, y_test, color=\"black\")\n plt.plot(X_test, y_pred, color=\"blue\", linewidth=3)\n plt.xticks(())\n plt.yticks(())\n plt.show()",
"def evaluate_random_forest(y_test, y_pred):",
"def evaluate(self):\n predictions = self.model.predict(self.test[0])\n accuracy = accuracy_score(self.test[1], predictions)\n print(\"Accuracy:\", str(accuracy * 100) + \"%\")\n self.plot_results(predictions)",
"def illustrate_prediction(model, test_data, test_target):\n selects = np.random.random_integers(0, len(test_data), 16)\n labels = test_target[selects]\n predicts = model.predict(test_data[selects])\n plt.figure()\n for k in range(16):\n plt.subplot(4, 4, k+1)\n plot_face(test_data[selects[k]])\n if predicts[k] == 1:\n plt.title('smile')\n else:\n plt.title('ugly')\n\n if predicts[k] != labels[k]:\n plt.plot([0, 24], [0, 24], 'r', linewidth=2)\n plt.plot([0, 24], [24, 0], 'r', linewidth=2)",
"def rmse(y_true, y_pred): # -> Any:\n ...",
"def visualizePredictions(testData,knn_predictions):\r\n testData.visualize.scatterPlot('Petal length','Petal width')\r\n testData.dataDict[testData.reference] = knn_predictions\r\n testData.visualize.scatterPlot('Petal length','Petal width')\r\n\r\n pass",
"def _plot_good_pred_whitout_reject(self, test: Set, title=None, fig_size=None):\r\n if fig_size is not None:\r\n fig = plt.figure(figsize=fig_size)\r\n else:\r\n fig = plt.figure()\r\n ax = fig.add_subplot(111)\r\n goodclassified_index = []\r\n for idx_preds in range(self.preds.shape[1]):\r\n new_good_index = []\r\n for idx in range(self.preds.shape[0]):\r\n if test.labels[idx] == self.preds[idx, idx_preds]:\r\n new_good_index.append(idx)\r\n if new_good_index:\r\n ax.scatter(test.features[new_good_index, 0], self.features[new_good_index, 1],\r\n label='Good classified top{0}'.format(int(idx_preds + 1)))\r\n goodclassified_index += new_good_index\r\n misclassified = [idx for idx in range(self.preds.shape[0]) if idx not in goodclassified_index]\r\n if misclassified:\r\n ax.scatter(test.features[misclassified, 0], test.features[misclassified, 1],\r\n label='Misclassified', marker='x', c='red')\r\n box = ax.get_position()\r\n ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])\r\n # Put a legend to the right of the current axis\r\n ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))\r\n if title is not None:\r\n ax.set_title(title)\r\n plt.show()",
"def parity_plot(y_pred, y_act):\n\n fig = plt.figure(figsize=FIG_SIZE)\n plt.scatter(y_act, y_pred)\n plt.plot([y_act.min(), y_act.max()], [y_act.min(), y_act.max()],\n lw=4, color='r')\n plt.xlabel('Actual')\n plt.ylabel('Predicted')\n\n return fig",
"def _plot_experiment(df, axes, metric_name, isTrain):\n # colors: https://stackoverflow.com/questions/42086276/get-default-line-colour-cycle\n ldf = metric_short_to_long(df)\n plotted = \"Train\" if isTrain else \"Val\"\n m = ldf.query(\"stat == 'mse' and metric == @metric_name\")[[\"trial\",\"state\",\"value\"]].rename({\"value\":\"mse\"},axis=1)\n # aggregated\n ax = sns.barplot(x=\"trial\", y=\"mse\", data=m, palette=[u'#1f77b4'], ci=\"sd\", ax=axes[0])\n ax.set_ylabel(\"MSE (log)\")\n ax.set_yscale(\"log\")\n ax.set_title(f\"Aggregated State Errors ({plotted})\")\n ax.set_xlabel(\"Trial Number\")\n\n # individual state plots\n ax = sns.barplot(x=\"trial\", y=\"mse\", hue=\"state\",data=m, ci=\"sd\", ax=axes[1])\n ax.set_ylabel(\"MSE (log)\")\n ax.set_yscale(\"log\")\n ax.set_title(f\"State Error by Trial ({plotted})\")\n ax.set_xlabel(\"Trial Number\")",
"def auto_evaluation(model,x_train,y_train,x_test,y_test):\n\n y_train_prediction=model.predict(x_train)\n y_test_prediction=model.predict(x_test)\n\n plt.scatter(y_train,y_train_prediction,c=\"b\",s=1,alpha=0.5)\n plt.scatter(y_test,y_test_prediction,c=\"r\",s=2,alpha=0.5)\n plt.xlabel(\"actual\")\n plt.ylabel(\"predicted\")\n\n print(\"tr R2: {:.2f}\".format(r2_score(y_train_prediction,y_train)))\n print(\"te R2: {:.2f}\".format(r2_score(y_test_prediction,y_test))) \n \n return y_train_prediction,y_test_prediction"
] | [
"0.6575989",
"0.65215456",
"0.6404881",
"0.62492937",
"0.6163107",
"0.61196136",
"0.60816854",
"0.6080583",
"0.6039545",
"0.6038659",
"0.6025292",
"0.60227543",
"0.6006382",
"0.59930116",
"0.5945069",
"0.5939615",
"0.5928846",
"0.5928846",
"0.5887865",
"0.5881625",
"0.58782905",
"0.58576834",
"0.5856805",
"0.5844696",
"0.5840252",
"0.58336306",
"0.5809927",
"0.5777172",
"0.5773653",
"0.57705843"
] | 0.6889194 | 0 |
Test the AioBaseTurtle._calc_move function | def test_calc_move(self):
t = AioBaseTurtle()
t.speed(speed=5)
steps, delta = t._calc_move(Vec2D(0, 100))
self.assertEqual(steps, 20)
self.assertAlmostEqual(delta[0], 0.0)
self.assertAlmostEqual(delta[1], 5.0) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_move_step(self):\n t = AioBaseTurtle()\n t._move_step(Vec2D(-100, 0), 20, Vec2D(10,5))\n self.assertAlmostEqual(t._position[0], 100)\n self.assertAlmostEqual(t._position[1], 100)\n t.screen._drawline.assert_called_once_with(\n t.currentLineItem,\n ((-100.0, 0.0), (100.0, 100.0)), # called with mutable _position\n \"black\",\n 1,\n False\n )\n self.mock_update.assert_called_once_with()",
"def DoMove(position, move):\n return position - move",
"def test_set_position_after_travel(self):\n travelcalculator = TravelCalculator(25, 50)\n travelcalculator.start_travel(30)\n travelcalculator.set_position(80)\n assert travelcalculator.position_reached()\n assert travelcalculator.current_position() == 80",
"def move():\n Robot.move()",
"def test_change_direction(self):\n travelcalculator = TravelCalculator(50, 25)\n with patch(\"time.time\") as mock_time:\n mock_time.return_value = 1580000000.0\n travelcalculator.set_position(60)\n travelcalculator.start_travel(80)\n assert travelcalculator.travel_direction == TravelStatus.DIRECTION_DOWN\n\n # change direction after two seconds\n mock_time.return_value = 1580000002.0\n assert travelcalculator.current_position() == 64\n travelcalculator.start_travel(48)\n assert travelcalculator.travel_direction == TravelStatus.DIRECTION_UP\n\n assert travelcalculator.current_position() == 64\n assert not travelcalculator.position_reached()\n\n mock_time.return_value = 1580000004.0\n assert travelcalculator.current_position() == 56\n assert not travelcalculator.position_reached()\n\n mock_time.return_value = 1580000006.0\n assert travelcalculator.current_position() == 48\n assert travelcalculator.position_reached()",
"def movement(self):",
"def test_get_move_interface(self):\n h, w = 9, 9 # board size\n test_depth = 1\n starting_location = (2, 7)\n adversary_location = (0, 0) # top left corner\n iterative_search = False\n search_method = \"minimax\"\n heuristic = lambda g, p: 0. # return 0 everywhere\n\n # create a player agent & a game board\n agentUT = game_agent.CustomPlayer(\n test_depth, heuristic, iterative_search, search_method)\n\n # Test that get_move returns a legal choice on an empty game board\n board = isolation.Board(agentUT, 'null_agent', w, h)\n legal_moves = board.get_legal_moves()\n move = agentUT.get_move(board, legal_moves, lambda: 99)\n self.assertIn(move, legal_moves,\n (\"The get_move() function failed as player 1 on an \" +\n \"empty board. It should return coordinates on the \" +\n \"game board for the location of the agent's next \" +\n \"move. The move must be one of the legal moves on \" +\n \"the current game board.\"))\n\n # Test that get_move returns a legal choice for first move as player 2\n board = isolation.Board('null_agent', agentUT, w, h)\n board.apply_move(starting_location)\n legal_moves = board.get_legal_moves()\n move = agentUT.get_move(board, legal_moves, lambda: 99)\n self.assertIn(move, legal_moves,\n (\"The get_move() function failed making the first \" +\n \"move as player 2 on a new board. It should return \" +\n \"coordinates on the game board for the location \" +\n \"of the agent's next move. The move must be one \" +\n \"of the legal moves on the current game board.\"))\n\n # Test that get_move returns a legal choice after first move\n board = isolation.Board(agentUT, 'null_agent', w, h)\n board.apply_move(starting_location)\n board.apply_move(adversary_location)\n legal_moves = board.get_legal_moves()\n move = agentUT.get_move(board, legal_moves, lambda: 99)\n self.assertIn(move, legal_moves,\n (\"The get_move() function failed as player 1 on a \" +\n \"game in progress. It should return coordinates on\" +\n \"the game board for the location of the agent's \" +\n \"next move. The move must be one of the legal moves \" +\n \"on the current game board.\"))",
"def move(x,y):\r\n pass",
"def test_move_straight(controller):\n pos, angle = controller.odometry(20, 20, Vector2(0, 0), 0)\n assert pos == Vector2(\n 2 * math.pi * WHEEL_RADIUS * 10 / TICK_PER_REVOLUTION,\n 0,\n )\n assert angle == 0\n\n # Move backward in a straight line.\n pos, angle = controller.odometry(10, 10, Vector2(0, 0), math.pi / 2)\n assert pos.x < 1e-10\n assert pos.y == -2 * math.pi * WHEEL_RADIUS * 10 / TICK_PER_REVOLUTION\n assert angle == math.pi / 2",
"def test_maze_move_6(self):\n maze_template = [['#', '#', '#', '#', '#', '#', '#'], \n ['#', '.', '.', '.', '.', '.', '#'], \n ['#', '.', '#', '#', '#', '.', '#'], \n ['#', '.', '.', '@', '#', '.', '#'], \n ['#', '@', '#', '.', '@', '.', '#'], \n ['#', '#', '#', '#', '#', '#', '#']]\n rat_J = a2.Rat(a2.RAT_1_CHAR, 1, 1)\n rat_P = a2.Rat(a2.RAT_2_CHAR, 1, 4)\n\n maze = a2.Maze(maze_template, rat_J, rat_P)\n old_sprout_count = maze.num_sprouts_left\n\n maze.move(rat_J, a2.DOWN, a2.NO_CHANGE)\n\n self.assertEqual(maze.num_sprouts_left, old_sprout_count)",
"def test_maze_move_1(self):\n maze_template = [['#', '#', '#', '#', '#', '#', '#'], \n ['#', '.', '.', '.', '.', '.', '#'], \n ['#', '.', '#', '#', '#', '.', '#'], \n ['#', '.', '.', '@', '#', '.', '#'], \n ['#', '@', '#', '.', '@', '.', '#'], \n ['#', '#', '#', '#', '#', '#', '#']]\n rat_J = a2.Rat(a2.RAT_1_CHAR, 1, 1)\n rat_P = a2.Rat(a2.RAT_2_CHAR, 1, 4)\n\n maze = a2.Maze(maze_template, rat_J, rat_P)\n\n self.assertEqual(maze.move(rat_J, a2.UP, a2.NO_CHANGE), False)",
"def test_verify_move(self):\n self._verify([self.applied_commands['move']])",
"def step(self, move):",
"def test_move():\n human = Human()\n coordinates = [2, 1]\n dimensions = [3, 4]\n\n new_coordinates = human.move(coordinates, dimensions)\n\n possible_new_coordinates = [[2, 0], [3, 0], [3, 1], [3, 2], [2, 2], [1, 2], [1, 1], [1, 0]]\n\n assert new_coordinates in possible_new_coordinates",
"def test_get_move(self):\n\n class DynamicTimer():\n \"\"\"Dynamic Timer allows the time limit to be changed after the\n timer is initialized so that the search timeout can be triggered\n before the timer actually expires. This allows the timer to expire\n when an event occurs, regardless of the clock time required until\n the event happens.\n \"\"\"\n def __init__(self, time_limit):\n self.time_limit = time_limit\n self.start_time = curr_time_millis()\n\n def time_left(self):\n return self.time_limit - (curr_time_millis() - self.start_time)\n\n w, h = 11, 11 # board size\n adversary_location = (0, 0)\n method = \"minimax\"\n\n # The agent under test starts at the positions indicated below, and\n # performs an iterative deepening minimax search (minimax is easier to\n # test because it always visits all nodes in the game tree at every\n # level).\n origins = [(2, 3), (6, 6), (7, 4), (4, 2), (0, 5), (10, 10)]\n exact_counts = [(8, 8), (32, 10), (160, 39), (603, 35), (1861, 54), (3912, 62)]\n\n for idx in range(len(origins)):\n\n # set the initial timer high enough that the search will not\n # timeout before triggering the dynamic timer to halt by visiting\n # the expected number of nodes\n time_limit = 1e4\n timer = DynamicTimer(time_limit)\n eval_fn = makeEvalStop(exact_counts[idx][0], timer, time_limit)\n agentUT, board = self.initAUT(-1, eval_fn, True, method,\n origins[idx], adversary_location,\n w, h)\n legal_moves = board.get_legal_moves()\n chosen_move = agentUT.get_move(board, legal_moves, timer.time_left)\n\n diff_total = abs(board.counts[0] - exact_counts[idx][0])\n diff_unique = abs(board.counts[1] - exact_counts[idx][1])\n\n self.assertTrue(diff_total <= 1 and diff_unique == 0, ID_FAIL)\n\n self.assertTrue(chosen_move in legal_moves, INVALID_MOVE.format(\n legal_moves, chosen_move))",
"def test_move(self):\n # Run a handful of GCMC moves\n n_moves = 10\n std_gcmc_sphere_sampler.move(std_gcmc_sphere_simulation.context, n_moves)\n\n # Check that all of the appropriate variables seem to have been updated\n # Hard to test individual moves as they are rarely accepted - just need to check the overall behaviour\n assert std_gcmc_sphere_sampler.n_moves == n_moves\n assert 0 <= std_gcmc_sphere_sampler.n_accepted <= n_moves\n assert len(std_gcmc_sphere_sampler.Ns) == n_moves\n assert len(std_gcmc_sphere_sampler.acceptance_probabilities) == n_moves\n assert isinstance(std_gcmc_sphere_sampler.energy, Quantity)\n assert std_gcmc_sphere_sampler.energy.unit.is_compatible(kilocalories_per_mole)\n\n return None",
"def test_maze_move_3(self):\n maze_template = [['#', '#', '#', '#', '#', '#', '#'], \n ['#', '.', '.', '.', '.', '.', '#'], \n ['#', '.', '#', '#', '#', '.', '#'], \n ['#', '.', '.', '@', '#', '.', '#'], \n ['#', '@', '#', '.', '@', '.', '#'], \n ['#', '#', '#', '#', '#', '#', '#']]\n rat_J = a2.Rat(a2.RAT_1_CHAR, 1, 1)\n rat_P = a2.Rat(a2.RAT_2_CHAR, 1, 4)\n\n maze = a2.Maze(maze_template, rat_J, rat_P)\n\n self.assertEqual(maze.move(rat_J, a2.DOWN, a2.NO_CHANGE), True)",
"def move(self, move):\n raise NotImplementedError()",
"def test_maze_move_2(self):\n maze_template = [['#', '#', '#', '#', '#', '#', '#'], \n ['#', '.', '.', '.', '.', '.', '#'], \n ['#', '.', '#', '#', '#', '.', '#'], \n ['#', '.', '.', '@', '#', '.', '#'], \n ['#', '@', '#', '.', '@', '.', '#'], \n ['#', '#', '#', '#', '#', '#', '#']]\n rat_J = a2.Rat(a2.RAT_1_CHAR, 1, 1)\n rat_P = a2.Rat(a2.RAT_2_CHAR, 1, 4)\n\n maze = a2.Maze(maze_template, rat_J, rat_P)\n\n self.assertEqual(maze.move(rat_J, a2.DOWN, a2.RIGHT), False)",
"def test_move(self):\n # Run a handful of GCMC moves\n n_moves = 10\n std_gcmc_system_sampler.move(std_gcmc_system_simulation.context, n_moves)\n\n # Check that all of the appropriate variables seem to have been updated\n # Hard to test individual moves as they are rarely accepted - just need to check the overall behaviour\n assert std_gcmc_system_sampler.n_moves == n_moves\n assert 0 <= std_gcmc_system_sampler.n_accepted <= n_moves\n assert len(std_gcmc_system_sampler.Ns) == n_moves\n assert len(std_gcmc_system_sampler.acceptance_probabilities) == n_moves\n assert isinstance(std_gcmc_system_sampler.energy, Quantity)\n assert std_gcmc_system_sampler.energy.unit.is_compatible(kilocalories_per_mole)\n\n return None",
"def choose_move(self):\n return 0",
"def test_maze_move_4(self):\n maze_template = [['#', '#', '#', '#', '#', '#', '#'], \n ['#', '.', '.', '.', '.', '.', '#'], \n ['#', '.', '#', '#', '#', '.', '#'], \n ['#', '.', '.', '@', '#', '.', '#'], \n ['#', '@', '#', '.', '@', '.', '#'], \n ['#', '#', '#', '#', '#', '#', '#']]\n rat_J = a2.Rat(a2.RAT_1_CHAR, 1, 1)\n rat_P = a2.Rat(a2.RAT_2_CHAR, 1, 4)\n\n maze = a2.Maze(maze_template, rat_J, rat_P)\n old_sprout_count = maze.num_sprouts_left\n\n maze.move(rat_J, a2.DOWN, a2.NO_CHANGE)\n maze.move(rat_J, a2.DOWN, a2.NO_CHANGE)\n maze.move(rat_J, a2.DOWN, a2.NO_CHANGE)\n\n self.assertEqual(maze.num_sprouts_left, old_sprout_count - 1)",
"def getMovement(self):\n # store the robot's current location and set the directional movement to 0,0 so that the robot won't move by default\n currentLocation = (self.me['x'], self.me['y'])\n directionalMovement = (0,0)\n\n # ensure that target location is not none and not equal to the current location\n if self.targetLocation and not currentLocation == self.targetLocation:\n\n # store the direction, directional movement, and the new map location we will trying to move the robot to this round\n direction = self.getDirection(currentLocation, self.targetLocation)\n directionalMovement = self.getDirectionalMovement(currentLocation, direction)\n newLocation = self.getNewLocation(currentLocation, directionalMovement)\n\n # store the current direction for use later\n initialDirection = direction\n\n # by default, the robot is ready to move in the event that the new map location is already passable\n readyToMove = True\n\n # while the new map location is not passable\n while not self.isPassable(newLocation):\n # if unit is a crusader moving diagonally at their fastest pace, set their directional movement to (1,1)\n if self.isCrusader and directionalMovement[0] == 2 and directionalMovement[1] == 2:\n directionalMovement[0] = 1\n directionalMovement[1] = 1\n # or if the unit is traveling faster than 1 block East\n elif directionalMovement[0] > 1:\n # lower the unit's movement East by 1 block\n directionalMovement[0] -= 1\n # or if the unit is traveling faster than 1 block West\n elif directionalMovement[0] < -1:\n # lower the unit's movement West by 1 block\n directionalMovement[0] += 1\n # or if the unit is traveling faster than 1 block South\n elif directionalMovement[1] > 1:\n # lower the unit's movement South by 1 block\n directionalMovement[1] -= 1\n # or if the unit is traveling faster than 1 block North\n elif directionalMovement[1] < -1:\n # lower the unit's movement North by 1 block\n directionalMovement[1] += 1\n # else the unit is already moving the shortest distance they can in the current direction\n else:\n # rotate the robots direction clockwise and proceed\n direction = self.getRotatedDirection(direction, 1)\n\n # if we ened up facing the same direction we started in\n if direction == initialDirection:\n # let the code know we're not ready to move\n readyToMove = False\n # break out of the while loop\n break\n\n # overwrite the directional movement with a new one based on the direction we just got\n directionalMovement = self.getDirectionalMovement(currentLocation, direction)\n\n # overwrite the new location with the location we get from the directional movement we just got\n newLocation = self.getNewLocation(currentLocation, directionalMovement)\n\n # if the robot ended up not being ready to move\n if not readyToMove:\n # change the directional movement back to (0,0) so that it doesn't move\n directionalMovement = (0,0)\n else :\n self.targetLocation = self.getRandomPassableLocation()\n # return the directional movement\n return directionalMovement",
"def testMove(intCurrentLeftPin, intCurrentRightPin, fltXCurrent, fltYCurrent,\r\n fltXTestDistance, fltYTestDistance):\r\n fltXNew = fltXCurrent + fltXTestDistance\r\n fltYNew = fltYCurrent + fltYTestDistance\r\n \r\n printMovement(fltXCurrent, fltYCurrent, fltXNew, fltYNew)\r\n \r\n # Calculate the operations required to move the drawing point.\r\n lsOperations = calculatePath(fltXCurrent, fltYCurrent, fltXNew, fltYNew)\r\n \r\n # Execute the operations.\r\n tpCurrentState = executeOperations(lsOperations, intCurrentLeftPin,\r\n intCurrentRightPin,\r\n fltXCurrent, fltYCurrent)\r\n \r\n (intCurrentLeftPin, intCurrentRightPin,\r\n fltXCurrent, fltYCurrent) = tpCurrentState\r\n \r\n return (intCurrentLeftPin, intCurrentRightPin, fltXCurrent, fltYCurrent)",
"def test_move(self):\n neq_gcmc_sphere_sampler.reset()\n\n # Just run one move, as they are a bit more expensive\n neq_gcmc_sphere_sampler.move(neq_gcmc_sphere_simulation.context, 1)\n\n # Check some of the variables have been updated as appropriate\n assert neq_gcmc_sphere_sampler.n_moves == 1\n assert 0 <= neq_gcmc_sphere_sampler.n_accepted <= 1\n assert len(neq_gcmc_sphere_sampler.Ns) == 1\n assert len(neq_gcmc_sphere_sampler.acceptance_probabilities) == 1\n\n # Check the NCMC-specific variables\n assert isinstance(neq_gcmc_sphere_sampler.velocities, Quantity)\n assert neq_gcmc_sphere_sampler.velocities.unit.is_compatible(nanometers/picosecond)\n assert len(neq_gcmc_sphere_sampler.insert_works) + len(neq_gcmc_sphere_sampler.delete_works) == 1\n assert 0 <= neq_gcmc_sphere_sampler.n_left_sphere <= 1\n assert 0 <= neq_gcmc_sphere_sampler.n_explosions <= 1\n\n return None",
"def test_move(self):\n neq_gcmc_system_sampler.reset()\n\n # Just run one move, as they are a bit more expensive\n neq_gcmc_system_sampler.move(neq_gcmc_system_simulation.context, 1)\n\n # Check some of the variables have been updated as appropriate\n assert neq_gcmc_system_sampler.n_moves == 1\n assert 0 <= neq_gcmc_system_sampler.n_accepted <= 1\n assert len(neq_gcmc_system_sampler.Ns) == 1\n assert len(neq_gcmc_system_sampler.acceptance_probabilities) == 1\n\n # Check the NCMC-specific variables\n assert isinstance(neq_gcmc_system_sampler.velocities, Quantity)\n assert neq_gcmc_system_sampler.velocities.unit.is_compatible(nanometers/picosecond)\n assert len(neq_gcmc_system_sampler.insert_works) + len(neq_gcmc_system_sampler.delete_works) == 1\n assert 0 <= neq_gcmc_system_sampler.n_explosions <= 1\n\n return None",
"def motion(self):\n priority = {\"north\": [-1, 0], \"south\": [1, 0],\n \"east\": [0, 1], \"west\": [0, -1]}\n\n priority_list = [\"north\", \"south\", \"east\", \"west\"]\n\n critical_point = False\n while critical_point is False:\n row = self.curr_cell.row\n column = self.curr_cell.col\n\n if self.allow_to_move(priority_list[0],\n row + priority[priority_list[0]][0],\n column + priority[priority_list[0]][1]):\n\n self.move(priority_list[0])\n\n elif self.allow_to_move(priority_list[1],\n row + priority[priority_list[1]][0],\n column + priority[priority_list[1]][1]):\n\n self.move(priority_list[1])\n\n elif self.allow_to_move(priority_list[2],\n row + priority[priority_list[2]][0],\n column + priority[priority_list[2]][1]):\n\n self.move(priority_list[2])\n\n elif self.allow_to_move(priority_list[3],\n row + priority[priority_list[3]][0],\n column + priority[priority_list[3]][1]):\n\n self.move(priority_list[3])\n\n else:\n # Robot isolated\n critical_point = True\n\n return self.curr_cell, self.path",
"def test_move(self):\n for row in range(self.game._dim):\n for col in range(self.game._dim):\n self.game.move(row, col, PLAYERX)\n self.assertEqual(self.game.get_square(row, col), PLAYERX)\n\n for row in range(self.game._dim):\n for col in range(self.game._dim):\n self.game.move(row, col, PLAYERO)\n self.assertEqual(self.game.get_square(row, col), PLAYERX)\n self.game._board[row][col] = PLAYERO\n self.game.move(row, col, PLAYERO)\n self.assertEqual(self.game.get_square(row, col), PLAYERO)",
"def test_move_default_extra_steps(self):\n player = ss.ResilientPlayer()\n random.seed(2)\n player.move()\n random.seed(1)\n player.move()\n random.seed(2)\n player.move()\n assert player.position == 32",
"def move(self, direction, step):\n for i in range(1, step + 1):\n y, x = self.robot_position\n if direction == \"N\" and y > 0:\n if self.carte[y - 1][x] in [\" \", \".\", \"U\"]:\n self.robot_position = (y - 1, x)\n elif direction == \"S\" and y <= self.height:\n if self.carte[y + 1][x] in [\" \", \".\", \"U\"]:\n self.robot_position = (y + 1, x)\n elif direction == \"E\" and x <= self.width+1:\n if self.carte[y][x + 1] in [\" \", \".\", \"U\"]:\n self.robot_position = (y, x + 1)\n elif direction == \"O\" and x > 0:\n if self.carte[y][x - 1] in [\" \", \".\", \"U\"]:\n self.robot_position = (y, x - 1)\n\n if self.robot_position == self.out_position:\n print(\"Bravo vous avez fini\")\n return True\n\n return False"
] | [
"0.77467954",
"0.67991954",
"0.67526376",
"0.6682421",
"0.6611246",
"0.66024905",
"0.65450394",
"0.65094215",
"0.6458481",
"0.6436995",
"0.6424645",
"0.64221406",
"0.6376048",
"0.63608104",
"0.6327564",
"0.63082105",
"0.6292444",
"0.62719584",
"0.62709236",
"0.62575185",
"0.6252183",
"0.62429845",
"0.62324667",
"0.6226187",
"0.6206077",
"0.61917627",
"0.6178722",
"0.6163082",
"0.6157391",
"0.61289006"
] | 0.88943046 | 0 |
Test the AioBaseTurtle._calc_rotation function | def test_calc_rotation(self):
t = AioBaseTurtle()
t.speed(speed=2)
orient, steps, delta = t._calc_rotation(120)
self.assertEqual(steps, 21)
self.assertAlmostEqual(delta, 120.0 / 21.0)
self.assertAlmostEqual(orient[0], math.cos(math.radians(120)))
self.assertAlmostEqual(orient[1], math.sin(math.radians(120))) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_rotation_angle(self):\n\n self.test_shape.azimuth_placement_angle = [45, 135, 225, 315]\n test_volume = self.test_shape.volume()\n self.test_shape.rotation_angle = 180\n assert self.test_shape.volume() == pytest.approx(test_volume * 0.5)",
"def test_rotation(self, tol):\n theta = 0.98\n S = symplectic.rotation(theta)\n expected = np.block([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]])\n np.allclose(S, expected, atol=tol, rtol=0)",
"def test_rotate(self):\n rotable = TestRotable()\n command = RotateCommand(rotable)\n collinear_to_new_direction = rotable.get_direction() + rotable.get_angular_velocity()\n\n command()\n\n ratio = norm(rotable.get_direction()) / norm(collinear_to_new_direction)\n self.assertTrue(allclose(collinear_to_new_direction * ratio, rotable.get_direction()))\n self.assertTrue(isclose(norm(rotable.get_direction()), 1))",
"def test_rotated(self):\n self._calibration_test(\"rotated\")",
"def compute_rotation(self):\n if self.predictions[self.iteration][0] == 90.0 or self.predictions[self.iteration][0] == 270.0:\n self.rotation = 20\n self.initial_adjust = True\n return\n\n if self.iteration == 0 or (self.iteration == 1 and self.initial_adjust):\n self.rotation = rotate.get_90_deg_rotation(self.predictions[self.iteration])\n elif self.iteration == 1 or (self.iteration == 2 and self.initial_adjust):\n self.rotation = rotate.get_45_deg_rotation(self.predictions, self.current_position)\n elif self.iteration >= 2 or (self.iteration > 2 and self.initial_adjust):\n self.rotation = rotate.get_fine_rotation(self.iteration)",
"def test_rotation_isometry(self):\n import numpy\n\n # test for all kinds of curvatures K\n for k in (0, 1, -1, 1/11, -1/11, 11, -2):\n \n s = space(curvature=k)\n\n # use a small enough magnitude to not break math for very negative K\n magic = 0.33377777373737737777\n # 1/sqrt(2)\n s2_ref = 0.707106781186547524400844362104785\n\n o = s.make_origin(2)\n p = s.make_point((1, 0), magic)\n q = s.make_point((s2_ref, s2_ref), magic)\n\n rot = space_point_transform(\n numpy.array([[1,0,0],[0,s2_ref,-s2_ref],[0,s2_ref,s2_ref]]),\n curvature=k,\n math = common_math\n )\n\n f, g, i = map(space_point_transform, (p, q, o))\n\n def check_transform_eq(t1, t2, invert=False):\n for ref in (\n s.make_point((5/13, 12/13), magic),\n s.make_point((-3/5, 4/5), magic)\n ):\n self.assertTrue(invert ^ point_isclose(\n t1(ref),\n t2(ref),\n abs_tol = 1e-12\n ))\n\n # 1/8 turn, times 8\n check_transform_eq(rot*8, i)\n\n # rotate, shift, rotate\n check_transform_eq(g, rot + f + rot * -1)\n\n # the other way\n check_transform_eq(f, rot * -1 + g + rot)",
"def test_arbitrary_rotation(self):\n \n # This test is run a bunch of times on various intervals, ranging from 50% to 1/6\n\t\t# (16.667%).\n for i in range(2, 7):\n \n interval = 1 / i # The amount to increase each qubit's probability by, relative to the previous qubit\n step_string = \"{:.4f}\".format(100 / i) # The decimal representation of the interval, as a percent\n target_probabilities = [0] * (i + 1) # This will store the desired probabilities of each qubit\n for j in range(0, i + 1):\n target_probability = j * interval\n target_probabilities[j] = target_probability\n\n # Run the test\n self.run_test(self.arbitrary_rotation_function, f\"Rotation with steps of 1/{i} ({step_string}%)\", 2000, target_probabilities, 0.05)",
"def test_calculate_angle():\n r1 = np.array([0, 0, -1])\n r2 = np.array([0, 0, 0])\n r3 = np.array([1, 0, 0])\n\n expected_angle = 90\n calculated_angle = molecool.calculate_angle(r1, r2, r3, degrees = True)\n\n assert expected_angle == calculated_angle",
"def test_extract_rot_angle():\n v = np.zeros((4,2))\n try:\n angle = extract_rot_angle(v,min_points=0)\n except AssertionError,err:\n assert err.args[0]==\"Zero velocities not allowed.\"\n \n v[:,1] = 1.\n try:\n angle = extract_rot_angle(v,min_points=0)\n except AssertionError,err:\n assert err.args[0]==\"Failed to get both forward and backward directions.\"\n\n # Forwards-backwards motion.\n v[:,1] = 0.\n v[:2,0] = -1.1\n v[2:,0] = 1.2\n angle = extract_rot_angle(v,min_points=0)\n assert np.isclose(angle,np.pi)\n\n # Forwards-backwards motion.\n v[:,0] = 0.\n v[:2,1] = -.9\n v[2:,1] = .8\n angle = extract_rot_angle(v,min_points=0)\n assert np.isclose(angle,-np.pi/2)\n\n # Forwards-backwards motion with noise.\n v[:2,1] += (np.random.rand(2)*2-1)/10\n v[2:,1] += (np.random.rand(2)*2-1)/10\n angle = extract_rot_angle(v,min_points=0)\n assert np.isclose(angle,-np.pi/2,atol=.1)",
"def test_from_rotation_angle_coordinate_of_phi(rotationangle):\n\n # Get the coordinate at phi\n phi_dash = rotationangle[\"phi\"]\n c3 = rotationangle[\"cs\"].from_rotation_angle(phi_dash)\n\n # Ensure that it is at the origin\n assert c3 == pytest.approx(0.0)",
"def test_skel_rotation(self):\n cmds.file(f=1, new=1)\n cmds.mayaUSDImport(file=self.skel_file, ani=1, aef=1)\n\n values = cmds.keyframe('joint1.rx', q=1, vc=1)\n self.assertAlmostEqual(0.0, values[-1])",
"def test_calc_vector_rotation(time_location, moon_time_location, telescope_frame):\n if telescope_frame == \"itrs\":\n time, telescope_location = time_location\n else:\n time, telescope_location = moon_time_location\n\n source = SkyModel(\n name=\"Test\",\n ra=Longitude(12.0 * units.hr),\n dec=Latitude(-30.0 * units.deg),\n frame=\"icrs\",\n stokes=[1.0, 0.0, 0.0, 0.0] * units.Jy,\n spectral_type=\"flat\",\n )\n source.update_positions(time, telescope_location)\n\n coherency_rotation = np.squeeze(source._calc_coherency_rotation())\n\n assert np.isclose(np.linalg.det(coherency_rotation), 1)",
"def test_rotation_angle_warning(self):\n\n def warning_trigger():\n try:\n paramak.CenterColumnStudyReactor(\n inner_bore_radial_thickness=20,\n inboard_tf_leg_radial_thickness=50,\n center_column_shield_radial_thickness_mid=50,\n center_column_shield_radial_thickness_upper=100,\n inboard_firstwall_radial_thickness=20,\n divertor_radial_thickness=100,\n inner_plasma_gap_radial_thickness=80,\n plasma_radial_thickness=200,\n outer_plasma_gap_radial_thickness=90,\n # first number must be between plasma inner/outer radius\n plasma_high_point=(245, 240),\n plasma_gap_vertical_thickness=40,\n center_column_arc_vertical_thickness=520,\n rotation_angle=360)\n\n except BaseException:\n pass\n\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\"always\")\n warning_trigger()\n assert len(w) == 1\n assert issubclass(w[-1].category, UserWarning)\n assert \"360 degree rotation may result in a Standard_ConstructionError or AttributeError\" in str(\n w[-1].message)",
"def test_str_rotation_angle(self):\n xknx = XKNX()\n sensor = Sensor(\n xknx, \"TestSensor\", group_address_state=\"1/2/3\", value_type=\"rotation_angle\"\n )\n sensor.sensor_value.payload = DPTArray(\n (\n 0x2D,\n 0xDC,\n )\n )\n\n self.assertEqual(sensor.resolve_state(), 11740)\n self.assertEqual(sensor.unit_of_measurement(), \"°\")\n self.assertEqual(sensor.ha_device_class(), None)",
"def rotation(self):\n\t\treturn self.piv.a.rotate.v",
"def test_to_rotation(self):\r\n q = np.array([-1, 1, 3, 2])\r\n q = q / np.linalg.norm(q)\r\n R_gt = np.array([\r\n [-1/3., -14/15., -2/15.],\r\n [2/3., -1/3., 2/3.],\r\n [-2/3., 2/15., 11/15.]]).T\r\n R = to_rotation(q)\r\n\r\n zero_matrix = R - R_gt\r\n self.assertAlmostEqual(np.linalg.norm(zero_matrix), 0.0)\r\n\r\n for _ in range(20):\r\n q = np.random.randn(4)\r\n q /= np.linalg.norm(q)\r\n q_inv = quaternion_conjugate(q)\r\n\r\n R = to_rotation(q)\r\n R_inv = to_rotation(q_inv)\r\n\r\n zero_matrix = R @ R_inv - np.identity(3)\r\n self.assertAlmostEqual(np.linalg.norm(zero_matrix), 0.0)\r\n\r\n # orthogonal matrix\r\n zero_matrix = R @ R.T - np.identity(3)\r\n self.assertAlmostEqual(np.linalg.norm(zero_matrix), 0.0)",
"def testCalculateRotationDiff(self):\n # Test identity\n transform1 = numpy.eye(4)\n transform2 = numpy.eye(4)\n (_, result) = self.evaluator._calculateDifference(transform1, transform2)\n self.assertEqual(result, 0.0)\n # Test arbitrary rotation\n rot1 = numpy.array(\n [[0.0, 1.0, 0.0], [-1.0, 0.0, 0.0], [0.0, 0.0, 1.0]])\n rot2 = numpy.array(\n [[0.0, 0.0, -1.0], [0.0, 1.0, 0.0], [1.0, 0.0, 0.0]])\n transform1[0:3, 0:3] = numpy.matmul(transform1[0:3, 0:3], rot1)\n transform2[0:3, 0:3] = numpy.matmul(transform2[0:3, 0:3], rot2)\n (_, result) = self.evaluator._calculateDifference(transform1, transform2)\n self.assertAlmostEqual(result, 120.0 * numpy.pi / 180.0, 8)\n # Order shouldn't matter\n (_, result) = self.evaluator._calculateDifference(transform2, transform1)\n self.assertAlmostEqual(result, 120.0 * numpy.pi / 180.0, 8)\n # Test when the angle is pi\n transform1 = numpy.eye(4)\n transform2 = numpy.eye(4)\n transform2[0, 0] = -1.0\n transform2[1, 1] = -1.0\n (_, result) = self.evaluator._calculateDifference(transform1, transform2)\n # It might wrap to -pi, so check the absolute value\n self.assertAlmostEqual(abs(result), numpy.pi, 8)\n # Test an extreme value\n transform2 = -1.0 * numpy.eye(4)\n (_, result) = self.evaluator._calculateDifference(transform2, transform1)\n self.assertAlmostEqual(abs(result), numpy.pi)",
"def get_rotation(self) -> np.array:\n axis = self.get_arms()[1]\n force = [self.d_x, self.d_y] # \"Force applied on the arm\"\n o_m = [self.target.x_obj - axis.x_obj, self.target.y_obj - axis.y_obj]\n torque = o_m[0]*force[1] - o_m[1] * force[0] # OM vectorial F\n if torque == 1: # Anti clockwise rotation\n rotation = np.array([[0, -1], [1, 0]])\n if torque == -1: # Clockwise rotation\n rotation = np.array([[0, 1], [-1, 0]])\n if torque == 0: # No rotation\n rotation = np.array([[0, 0], [0, 0]])\n return rotation",
"def rotate_turtle(angle, mv_direction):\n \n if mv_direction == 1:\n turtle.right(angle)\n else:\n turtle.left(angle)",
"def get_rotation_angle(self, image):\n \n # TODO: Make real functionality\n return 0",
"def get_rot_dtdt(self) -> WAQuaternion:\n pass",
"def test_rot(self):\n\n print(\"rot()\")\n obs = self.fixture\n\n # rotation(0) = identity\n for axis in [1, 2, 3]:\n # theta = 0.0\n rotation = obs.rot(0.0, axis)\n # find || eye - rot1 ||\n diff = np.linalg.norm(np.eye(3) - rotation)\n self.assertAlmostEqual(diff, 0.0, delta=1e-12)\n # theta = 2*pi\n rotation = obs.rot(2.0 * np.pi, axis)\n # find || eye - rot1 ||\n diff = np.linalg.norm(np.eye(3) - rotation)\n self.assertAlmostEqual(diff, 0.0, delta=1e-12)\n\n # perform many randomized tests\n num_tests = 100\n num_products = 10\n for _test_counter in range(num_tests):\n thetas = []\n axes = []\n base = np.eye(3)\n # we will multiply a series of rotations into \"base\"\n rot_all = base\n for _rot_counter in range(num_products):\n theta = np.random.uniform(2 * np.pi) # in [0,2 pi]\n axis = np.random.randint(3) + 1 # in {1,2,3}\n axes.append(axis)\n thetas.append(theta)\n rotation = obs.rot(theta, axis)\n # multiply rot1 into the cumulative rotation\n rot_all = np.dot(rot_all, rotation)\n # now, back all the rotations out\n for _rot_counter in range(num_products):\n theta = thetas.pop()\n axis = axes.pop()\n # apply the inverse rotation\n rotation = obs.rot(-theta, axis)\n rot_all = np.dot(rot_all, rotation)\n # find || base - rot1 * rot2 ||\n diff = np.linalg.norm(base - rot_all)\n self.assertAlmostEqual(diff, 0.0, delta=1e-10 * num_products)",
"def comp_rot_dir(self):\n\n MMF = self.comp_mmf_unit()\n p = self.get_pole_pair_number()\n\n # Compute rotation direction from unit mmf\n results = MMF.get_harmonics(1, \"freqs\", \"wavenumber\")\n H1 = results[MMF.symbol]\n\n return sign(H1[0])",
"def make_rotation(self, rotation):\n if rotation == \"r\":\n self.facing += 1\n else:\n self.facing -= 1\n\n if self.facing > 3:\n self.facing = self.facing - 4\n elif self.facing < 0:\n self.facing = self.facing + 4",
"def rotation(self, *args, **kwargs) -> Any:\n pass",
"def _rotate(self, tetrino):\n tetrino.rotate()",
"def test_default_parameters(self):\n\n assert self.test_shape.rotation_angle == 360",
"def test_rotate_down(self):\n # Testing 'down' rotation clockwise\n side = 'D'\n rotation = list(self.cube.rotate_cube(side))\n result = [np.array([['g', 'g'], ['r', 'r']], dtype='<U1'),\n np.array([['y', 'y'], ['y', 'y']], dtype='<U1'),\n np.array([['o', 'o'], ['g', 'g']], dtype='<U1'),\n np.array([['w', 'w'], ['w', 'w']], dtype='<U1'),\n np.array([['b', 'b'], ['o', 'o']], dtype='<U1'),\n np.array([['r', 'r'], ['b', 'b']], dtype='<U1')]\n\n np.testing.assert_array_equal(rotation, result)",
"def rotation_angle(self):\n return self.container['rotation_angle']",
"def test_rotate_without_moving(controller):\n distance = math.pi / 2 * (DISTANCE_BETWEEN_WHEELS / 2)\n revolution = distance / (2 * math.pi * WHEEL_RADIUS)\n ticks = revolution * TICK_PER_REVOLUTION\n pos, angle = controller.odometry(\n round(10 - ticks),\n round(10 + ticks),\n Vector2(0, 0),\n 0,\n )\n\n # Rotate 90 degrees without moving.\n assert pos == Vector2(0, 0)\n assert round(math.pi / 2 / angle, 1) == 1\n\n # Rotate back to 0 degrees without moving.\n pos, angle = controller.odometry(10, 10, Vector2(0, 0), 0)\n assert pos == Vector2(0, 0)\n assert round(-math.pi / 2 / angle, 1) == 1"
] | [
"0.72649866",
"0.71997005",
"0.7070056",
"0.6858857",
"0.67564845",
"0.6559771",
"0.65583205",
"0.6543984",
"0.65229213",
"0.6519182",
"0.64923644",
"0.64011544",
"0.63578784",
"0.632081",
"0.62979436",
"0.62914723",
"0.62909883",
"0.62908113",
"0.6243588",
"0.62338036",
"0.6228936",
"0.6217549",
"0.6204204",
"0.6168009",
"0.61678576",
"0.6160663",
"0.6157259",
"0.61556774",
"0.6139853",
"0.61366385"
] | 0.91761756 | 0 |
Test the AioBaseTurtle._calc_circle function | def test_calc_circle(self):
t = AioBaseTurtle()
steps, step_len, rot_step = t._calc_circle(100, extent=180)
self.assertEqual(steps, 14)
self.assertAlmostEqual(rot_step, 180.0 / 14.0)
self.assertAlmostEqual(step_len, 22.3928952207) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def draw_circle(c):\n turtle.circle(c.radius)",
"def draw_circle(c):\n turtle.circle(c.radius)",
"def GetCircle(circle):\r\n pass",
"def test_get_radius():\n center = Coordinates(7, 3)\n radius = 12\n\n returned_rad = get_radius(center, radius, 30)\n\n assert returned_rad == radius\n assert returned_rad != center.get_x()\n assert returned_rad != center.get_y()",
"def circle(radius, extent=360):\n turtleTmp.circle(radius, extent)",
"def test_circumference():\n assert func_difficult.circumference_circle(1) == 2 * np.pi, \"returns pi *2\"\n assert func_difficult.circumference_circle(0) == 0, \"is 0\"\n assert func_difficult.circumference_circle(10) == 2 * np.pi * 10",
"def draw_circle(t, circle):\n t.pu()\n t.goto(circle.center.x, circle.center.y)\n t.pd()\n polygon.circle(t, circle.radius)",
"def test_circumference():\n assert func1.circumference_circle(1) == 2 * np.pi, \"returns pi *2\"\n assert func1.circumference_circle(0) == 0, \"is 0\"\n assert func1.circumference_circle(10) == 2 * np.pi * 10",
"def drawCircle(r):\r\n # create a turtle-painter instance using turtle library\r\n painter = turtle.Turtle()\r\n\r\n # turtle properties (we want the turtle to look nicer)\r\n painter.shape(\"turtle\") # setting painter shape to turtle\r\n painter.shapesize(3,3,1) # making turtle-painter 3 times bigger\r\n painter.color(\"limegreen\") # setting painting color to limegreen\r\n\r\n # move the turtle-painter to ready position\r\n painter.pu() # we just move without drawing anything\r\n x0 = coordX(r, 0) # compute initial coordinate x0\r\n y0 = coordY(r, 0) # compute initial coordinate y0\r\n\r\n painter.goto(x0,y0) # move the turtle to the ready position\r\n \r\n # tell the turtle to put pencil down on the paper\r\n painter.pd()\r\n\r\n # draw a circle\r\n for theta in range(0, 361, 1):\r\n x = coordX(r, theta, useradians = False)\r\n y = coordY(r, theta, useradians = False)\r\n\r\n painter.goto(x,y)\r\n\r\n # tell the turtle to put pencil up from the paper\r\n painter.pu()\r\n # hide the painter after he finished to draw\r\n painter.ht()\r\n print(\"Draw a circle of r = \", r )",
"def area_of_circle(radius = radious):\n area = radius * radious * 3.142\n print(\"Calculating area...\")\n time.sleep(2)\n return area",
"def test_generate_circle(self):\n\n # Generate a circle around Sydney airport with radius 3km\n radius = 3000\n C = self.Syd.generate_circle(radius)\n\n # Check distance around the circle\n # Note that not every point will be exactly 3000m\n # because the circle in defined in geographic coordinates\n for c in C:\n p = Point(c[1], c[0])\n d = self.Syd.distance_to(p)\n msg = ('Radius %f not with in expected tolerance. Expected %d'\n % (d, radius))\n assert numpy.allclose(d, radius, rtol=2.0e-1), msg\n\n # Store and view\n #from safe.storage.vector import Vector\n #Vector(geometry=[C],\n # geometry_type='polygon').write_to_file('circle.shp')\n #Vector(geometry=C,\n # geometry_type='point').write_to_file('circle_as_points.shp')\n #Vector(geometry=[[self.Syd.longitude, self.Syd.latitude]],\n # geometry_type='point',\n # data=None).write_to_file('center.shp')",
"def area_of_circle(radius):\n return radius",
"def circle():\n xmin=0\n xmax=6.5\n ymin=0.\n ymax=6.5\n\n x = arange(xmin, xmax, 0.005)\n y = x*1.\n [xx, yy] = meshgrid(x, y)\n\n zz=sqrt((xx-3.2475)**2.+(yy-3.2475)**2.)\n zz2=zz*1.\n zz2[(zz <= 3.25)]=1.\n zz2[(zz <= 3.25*0.2)]=0.\n zz2[(zz > 3.25)]=0.\n zz3=zeros(numpy.array(numpy.shape(zz2))/10)\n for i in arange(len(xx)/10):\n for j in arange(len(yy)/10):\n zz3[i,j]=numpy.sum(zz2[(i*10):(i*10+10),(j*10):(j*10+10)])/100.\n\n return zz3",
"def circleCirc(radius):\n radius = float(radius)\n return 2*math.pi*radius",
"def get_radius(self):",
"def circle(draw, centrex, centrey, radius, color=\"#AAAAAAFF\") -> None:\n # convert cartesian centre to pixel centre\n cx, cy = pixelcoord(centrex, centrey)\n # top left and bottom right coordinates\n rect = [(cx-radius, cy-radius), (cx+radius, cy+radius)]\n # draw\n draw.arc(rect, 0, 360, color)",
"def circle(center, radius, *args, **kwargs):\n return patch.Circle(center, radius, *args, **kwargs)",
"def objects_radius(self, centre, radius):",
"def value_circle(self):\r\n return self.circle",
"def circle_circumference(a):\n return (2*a*math.pi)",
"def make_circle(self):\n A = 2*np.random.rand(self.m, self.n)-1\n b = np.sign(np.sum(A**2, 1) - self.radius)\n return A, b",
"def circle(t, r):\n circumference = math.pi * 2 * r\n n = 60\n length = circumference / n\n polygon(t, length, n)",
"def fit_circle_func():\n pass",
"def drawCircle(t, x, y, radius):\r\n t.up()\r\n t.goto(x + radius, y)\r\n t.setheading(90)\r\n t.down()\r\n for count in range(120):\r\n t.left(3)\r\n t.forward(2.0 * math.pi * radius / 120.0)",
"def draw_circle(self, center, radius, line_width, line_color, fill_color=\"\"):\n line_color, fill_color = check_color(line_color), check_color(fill_color)\n SToval.oval(self.canvas, center, radius, line_width, line_color, fill_color)",
"def test_circle_draw():\n with TestingCanvas():\n ellipse = visuals.Ellipse(pos=(0.5, 0.3, 0), radius=0.4,\n color=(1, 0, 0, 1))\n ellipse.draw()\n assert_image_equal(\"screenshot\", 'visuals/circle1.png')\n\n gloo.clear()\n ellipse = visuals.Ellipse(pos=(0.5, 0.3, 0), radius=0.4,\n color=(1, 0, 0, 1),\n border_color=(0, 1, 1, 1))\n ellipse.draw()\n assert_image_equal(\"screenshot\", 'visuals/circle2.png')\n\n gloo.clear()\n ellipse = visuals.Ellipse(pos=(0.5, 0.3, 0), radius=0.4,\n border_color=(0, 1, 1, 1))\n ellipse.draw()\n assert_image_equal(\"screenshot\", 'visuals/circle3.png')",
"def circle_radius(a, b, c):\n # the sides cannot be negative\n if a < 0 or b < 0 or c < 0:\n return None\n else:\n # semi-perimeter of the circle\n p = (a + b + c) / 2\n\n # area of the traingle\n area = sqrt(p * (p - a) *\n (p - b) * (p - c))\n # Radius of the incircle\n radius = area / p\n # Return the radius\n return radius",
"def test_update_radius():\n center = Coordinates(1, 1)\n rad1 = 20.3\n speed = 30\n\n i = Intersection(center, rad1, speed)\n\n assert i.get_radius() == 20.3\n\n i.update_radius(56.5)\n\n assert i.get_radius() == 56.5",
"def iscircle(a):\n if isarc(a):\n start=a[1][1] \n end=a[1][2]\n ## these are special, integer values that flag a true full\n ## circle.\n if start==0 and end==360:\n return True\n else:\n return False",
"def remote_concentric_circles(circle_turtle,dis_range,radius):\r\n for i in range(dis_range):\r\n color = random.choice(dark_colors)\r\n circle_turtle.color(color)\r\n circle_turtle.circle(radius*i)\r\n circle_turtle.up()\r\n circle_turtle.sety((radius*i)*(-1))\r\n circle_turtle.down()\r\n\r\n circle_turtle.up()\r\n circle_turtle.goto(0,0)\r\n circle_turtle.down()"
] | [
"0.734142",
"0.734142",
"0.72636837",
"0.703413",
"0.6813236",
"0.6676832",
"0.6654256",
"0.66349846",
"0.6573545",
"0.65500736",
"0.6521824",
"0.6442423",
"0.64218247",
"0.6412573",
"0.63668287",
"0.63393223",
"0.6279476",
"0.6262953",
"0.6237789",
"0.6233661",
"0.6233039",
"0.6232965",
"0.6225517",
"0.6213921",
"0.62127304",
"0.61972475",
"0.6193501",
"0.61925143",
"0.6183752",
"0.6175291"
] | 0.8547868 | 0 |
Test the AioBaseTurtle._move_step function | def test_move_step(self):
t = AioBaseTurtle()
t._move_step(Vec2D(-100, 0), 20, Vec2D(10,5))
self.assertAlmostEqual(t._position[0], 100)
self.assertAlmostEqual(t._position[1], 100)
t.screen._drawline.assert_called_once_with(
t.currentLineItem,
((-100.0, 0.0), (100.0, 100.0)), # called with mutable _position
"black",
1,
False
)
self.mock_update.assert_called_once_with() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def step(self, move):",
"def test_calc_move(self):\n t = AioBaseTurtle()\n t.speed(speed=5)\n steps, delta = t._calc_move(Vec2D(0, 100))\n self.assertEqual(steps, 20)\n self.assertAlmostEqual(delta[0], 0.0)\n self.assertAlmostEqual(delta[1], 5.0)",
"def move(self, direction, step):\n for i in range(1, step + 1):\n y, x = self.robot_position\n if direction == \"N\" and y > 0:\n if self.carte[y - 1][x] in [\" \", \".\", \"U\"]:\n self.robot_position = (y - 1, x)\n elif direction == \"S\" and y <= self.height:\n if self.carte[y + 1][x] in [\" \", \".\", \"U\"]:\n self.robot_position = (y + 1, x)\n elif direction == \"E\" and x <= self.width+1:\n if self.carte[y][x + 1] in [\" \", \".\", \"U\"]:\n self.robot_position = (y, x + 1)\n elif direction == \"O\" and x > 0:\n if self.carte[y][x - 1] in [\" \", \".\", \"U\"]:\n self.robot_position = (y, x - 1)\n\n if self.robot_position == self.out_position:\n print(\"Bravo vous avez fini\")\n return True\n\n return False",
"def move(self, step):\n\n status = self.read()\n Logger.getLogger().debug(\"Status in move method: %s\", status)\n # while the motors are moving we don't want to start another movement\n if status > CurtainsStatus.OPEN or self.motor.value:\n return\n\n self.target = step\n\n # deciding the movement direction\n if self.steps() < self.target:\n self.__open__()\n elif self.steps() > self.target:\n self.__close__()",
"def moveStep(self):\n\t\tif self.pos[0] <= self.boundsX[0] or \\\n\t\t(self.pos[0]+ 2*(self.radius)) >= self.boundsX[1]:\n\t\t\tself.dir[0] *= -1\n\t\t\t\n\t\tself.pos[0] += self.dir[0]*self.speed\n\t\tself.pos[1] += self.dir[1]*self.speed",
"def moveStep(self):\n\t\tif self.pos[0] < self.boundsX[0] or \\\n\t\t\tself.pos[0] > (self.boundsX[1] - self.width):\n\t\t\t\tself.dir[0] *= -1\n\t\tif self.pos[1] < self.boundsY[0] or \\\n\t\t self.pos[1] > (self.boundsY[1] - self.height):\n\t\t\t\tself.dir[1] *= -1\n\t\t\t\n\t\tself.pos[0] += self.dir[0]*self.speed\n\t\tself.pos[1] += self.dir[1]*self.speed",
"def _step(self) -> None:",
"def move(self, head, steps):\n self.turn(head)\n if self.direction == 0:\n self.x += int(steps)\n if self.direction == 1:\n self.y += int(steps)\n if self.direction == 2:\n self.x -= int(steps)\n if self.direction == 3:\n self.y -= int(steps)",
"def move_step(self, direction):\n x = self.objects[0].x\n y = self.objects[0].y\n if direction == 0 and y >= 1:\n self.objects[0].y -= 1\n elif direction == 1 and y <= self.size_y - 2:\n self.objects[0].y += 1\n elif direction == 2 and x >= 1:\n self.objects[0].x -= 1\n elif direction == 3 and x <= self.size_x - 2:\n self.objects[0].x += 1",
"def move():\n Robot.move()",
"def move(self, timestep):\n if self.trajectoryStep >= len(self.trajectory):\n # return trajectory completed\n return False\n\n target2DPosition = self.trajectory[self.trajectoryStep]\n vector = [-target2DPosition[0] - self.translation[0],\n -target2DPosition[1] - self.translation[1],\n 0.0]\n distance = math.sqrt(vector[0] * vector[0] + vector[1] * vector[1] +\n vector[2] * vector[2])\n maxStep = MovingTarget.SPEED * timestep\n\n if distance < maxStep:\n self.trajectoryStep += 1\n self.translation = [a + b for a, b in zip(self.translation, vector)]\n segmentChanged = True\n else:\n if math.isinf(self.rotationStep):\n self.rotationStepsCount = 10\n newAngle = math.acos(dotProduct([1.0, 0.0, 0.0], vector))\n if vector[1] < 0.01:\n newAngle = -newAngle\n diff = self.rotationAngle - newAngle\n while diff > math.pi:\n diff -= 2 * math.pi\n while diff < -math.pi:\n diff += 2 * math.pi\n self.rotationStep = -diff / self.rotationStepsCount\n\n factor = maxStep / distance\n self.translation[0] += vector[0] * factor\n self.translation[1] += vector[1] * factor\n segmentChanged = False\n\n self.translationField.setSFVec3f(self.translation)\n\n if self.rotationStepsCount > 0:\n if segmentChanged:\n self.rotationAngle += self.rotationStep * \\\n self.rotationStepsCount\n self.rotationStepsCount = 0\n else:\n self.rotationAngle += self.rotationStep\n self.rotationStepsCount -= 1\n self.rotationField.setSFRotation([0.0, 0.0, 1.0,\n self.rotationAngle])\n\n if segmentChanged:\n self.rotationStep = float('Inf')\n return True",
"def move(self, *step):\n self.x += step[0]\n self.y += step[1]",
"def move(self, t, s):\n raise NotImplementedError",
"def do_step(self) -> None:",
"def test_move_default_dropped_steps(self):\n player = ss.LazyPlayer()\n random.seed(2)\n player.move()\n random.seed(5)\n player.move()\n assert player.position == 44",
"def move_time(self, step_before, step_after):\n raise NotImplementedError",
"def test_move_default_extra_steps(self):\n player = ss.ResilientPlayer()\n random.seed(2)\n player.move()\n random.seed(1)\n player.move()\n random.seed(2)\n player.move()\n assert player.position == 32",
"def test_set_position_after_travel(self):\n travelcalculator = TravelCalculator(25, 50)\n travelcalculator.start_travel(30)\n travelcalculator.set_position(80)\n assert travelcalculator.position_reached()\n assert travelcalculator.current_position() == 80",
"def move(self,dt):\n raise NotImplementedError(\"Robot.move\")",
"def move(self, move):\n raise NotImplementedError()",
"def move_step(self, move):\n # Check that the move is valid\n steps = self.mgr.obj.steps\n if len(steps) == 0:\n return\n idx = self.stepsListWidget.currentRow()\n idx_max = len(steps) - 1\n if (idx+move < 0) or (idx+move > idx_max):\n return\n \n # Insert the step at its new location, then delete it at the old location\n steps.insert(idx+move+(move>0), steps[idx])\n del steps[idx if move>0 else idx+1]\n \n self.load_steps()\n self.stepsListWidget.setCurrentRow(idx+move)\n self.mgr.changed = True",
"def step(self, action):\n x, y = self._move(action, *self._currentPos)\n\n if chr(self._grid[x, y]) == CASE_TYPES.Wall:\n # error - previous state was already a wall\n self._done = True\n self._trajectory.append(self._currentPos)\n return self._currentPos, -1, self._done, {}\n\n reward = {\n CASE_TYPES.Water: self.waterReward,\n CASE_TYPES.Sand: self.sandReward,\n CASE_TYPES.Open: self.stepReward,\n CASE_TYPES.Termination: self.successReward,\n CASE_TYPES.Trap: (\n -(self.maxSteps - len(self._trajectory)) + self.failureReward +\n self.trapReward)\n }[chr(self._grid[x, y])]\n\n # termination state\n if chr(self._grid[x, y]) in [CASE_TYPES.Termination, CASE_TYPES.Trap]:\n self._done = True\n\n self._currentPos = (x, y)\n\n self._trajectory.append(self._currentPos)\n self._nbSteps += 1\n\n if self._nbSteps >= self.maxSteps and not self._done:\n reward += self.failureReward\n\n return self._currentPos, reward, self._done, {}",
"def test_move_dropped_steps_greater_than_move(self):\n player = ss.LazyPlayer(dropped_steps=3)\n random.seed(2)\n player.move()\n random.seed(2)\n player.move()\n assert player.position == 40",
"def _step(self):\n pass",
"def step(self, action):\n # print(\"############################\")\n # print(\"action: {}\".format(action))\n\n self.movement_complete.data = False\n\n # 1) Read last joint positions by getting the observation before acting\n old_observation = self.get_obs()\n\n # 2) Get the new joint positions according to chosen action (actions here are the joint increments)\n if self._joint_increment is None:\n next_action_position = action\n else:\n next_action_position = self.get_action_to_position(action, old_observation[1:7])\n\n # 3) Move to position and wait for moveit to complete the execution\n self.publisher_to_moveit_object.pub_joints_to_moveit(next_action_position)\n # rospy.wait_for_message(\"/pickbot/movement_complete\", Bool)\n while not self.movement_complete.data:\n pass\n\n start_ros_time = rospy.Time.now()\n while True:\n # Check collision:\n # invalid_collision = self.get_collisions()\n # if invalid_collision:\n # print(\">>>>>>>>>> Collision: RESET <<<<<<<<<<<<<<<\")\n # observation = self.get_obs()\n # reward = UMath.compute_reward(observation, -200, True)\n # observation = self.get_obs()\n # print(\"Test Joint: {}\".format(np.around(observation[1:7], decimals=3)))\n # return U.get_state(observation), reward, True, {}\n\n elapsed_time = rospy.Time.now() - start_ros_time\n if np.isclose(next_action_position, self.joints_state.position, rtol=0.0, atol=0.01).all():\n break\n elif elapsed_time > rospy.Duration(2): # time out\n break\n # time.sleep(s\n\n \"\"\"\n #execute action as long as the current position is close to the target position and there is no invalid collision and time spend in the while loop is below 1.2 seconds to avoid beeing stuck touching the object and not beeing able to go to the desired position \n time1=time.time()\n while np.linalg.norm(np.asarray(self.joints_state.position)-np.asarray(next_action_position))>0.1 and self.get_collisions()==False and time.time()-time1<0.1: \n rospy.loginfo(\"Not yet reached target position and no collision\")\n \"\"\"\n # 4) Get new observation and update min_distance after performing the action\n new_observation = self.get_obs()\n if new_observation[0] < self.min_distace:\n self.min_distace = new_observation[0]\n # print(\"observ: {}\".format( np.around(new_observation[1:7], decimals=3)))\n\n # 5) Convert Observations into state\n state = U.get_state(new_observation)\n\n # 6) Check if its done, calculate done_reward\n done, done_reward, invalid_contact = self.is_done(new_observation)\n\n # 7) Calculate reward based on Observatin and done_reward and update the accumulated Episode Reward\n reward = UMath.compute_reward(new_observation, done_reward, invalid_contact)\n\n ### TEST ###\n if done:\n joint_pos = self.joints_state.position\n print(\"Joint in step (done): {}\".format(np.around(joint_pos, decimals=3)))\n ### END of TEST ###\n\n self.accumulated_episode_reward += reward\n\n self.episode_steps += 1\n\n return state, reward, done, {}",
"def move(self):\n pass",
"def step(self, state):",
"def test_maze_move_6(self):\n maze_template = [['#', '#', '#', '#', '#', '#', '#'], \n ['#', '.', '.', '.', '.', '.', '#'], \n ['#', '.', '#', '#', '#', '.', '#'], \n ['#', '.', '.', '@', '#', '.', '#'], \n ['#', '@', '#', '.', '@', '.', '#'], \n ['#', '#', '#', '#', '#', '#', '#']]\n rat_J = a2.Rat(a2.RAT_1_CHAR, 1, 1)\n rat_P = a2.Rat(a2.RAT_2_CHAR, 1, 4)\n\n maze = a2.Maze(maze_template, rat_J, rat_P)\n old_sprout_count = maze.num_sprouts_left\n\n maze.move(rat_J, a2.DOWN, a2.NO_CHANGE)\n\n self.assertEqual(maze.num_sprouts_left, old_sprout_count)",
"def movement(self):",
"def move_turtle(self):\n self.forward(self.move_speed)"
] | [
"0.7820133",
"0.7763472",
"0.6788755",
"0.6762144",
"0.6700953",
"0.66414285",
"0.6528104",
"0.651918",
"0.6467042",
"0.644896",
"0.6419937",
"0.639203",
"0.637763",
"0.63649815",
"0.6354341",
"0.63443154",
"0.63338375",
"0.63002443",
"0.6289629",
"0.62895745",
"0.62881094",
"0.6285331",
"0.62846184",
"0.62506056",
"0.6224792",
"0.6191413",
"0.6185826",
"0.6178296",
"0.61711407",
"0.61653686"
] | 0.8622489 | 0 |
Temporarily overwrite the settings with test settings. This allows to use test datasets for testing. | def generate_test_settings(tmpdir, dataset):
# When `tmpdir` is a path convert it to a string
if isinstance(tmpdir, py._path.local.LocalPath):
tmpdir = str(tmpdir)
test_settings = {
'datasets': {
'mnist': {
'train': {
'images': "file://" + tmpdir + "/" + dataset + "/server/train-images-idx3-ubyte.gz",
'labels': "file://" + tmpdir + "/" + dataset + "/server/train-labels-idx1-ubyte.gz"
},
'test': {
'images': "file://" + tmpdir + "/" + dataset + "/server/t10k-images-idx3-ubyte.gz",
'labels': "file://" + tmpdir + "/" + dataset + "/server/t10k-labels-idx1-ubyte.gz"
},
},
},
'data-dir': tmpdir + "/" + dataset + "/data"
}
overwrite_settings(test_settings) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def force_test_setting(dm, tsm, output_path):\n if dm is not None:\n data_json_path = os.path.join(output_path, 'cur_data_setting.json')\n dm.data_par['datapro']['dataset']['prepare_data'] = False\n dm.data_par['datapro']['reg']['max_num_for_loading'] = [1, 1, -1, 1]\n dm.save(data_json_path)\n else:\n tsm.task_par['dataset']['max_num_for_loading'] = [1, 1, -1, 1]\n tsm.task_par['tsk_set']['train'] = False\n tsm.task_par['tsk_set']['continue_train'] = False\n tsk_json_path = os.path.join(output_path, 'cur_task_setting.json')\n tsm.save(tsk_json_path)",
"def teardown_function():\n\n # Force module reload as the default test settings have been restored\n importlib.reload(defaults)",
"def __perapre_test_setting(package_settings: dict) -> dict:\n\n __package_setting = copy.deepcopy(package_settings)\n\n __package_setting['slient'] = False\n\n if __package_setting.get('weights') is not None:\n __package_setting['weights'] = [1, 1, 1, 1, 1]\n\n return __package_setting",
"def test_settings_restored(self) -> None:\n from django.conf import settings\n\n assert TestLiveServer._test_settings_before_run is True # type: ignore[attr-defined]\n assert (\n f\"{settings.__class__.__module__}.{settings.__class__.__name__}\"\n == \"django.conf.Settings\"\n )\n assert settings.ALLOWED_HOSTS == [\"testserver\"]",
"def get_test_settings():\n from youtube_podcast_api.config import Settings\n settings = Settings()\n settings.db_path = \"./sql_test.db\"\n return settings",
"def setUp(self):\n self.settings = MockSettings()\n django_yamlconf.load(project=\"testing\", settings=self.settings)",
"def setUp(self):\n self.dataset = get_test_dataset()",
"def test_default_options(self):\r\n\r\n settings.ASSETS_URL_EXPIRE = True\r\n assert get_env().config['url_expire'] == settings.ASSETS_URL_EXPIRE\r\n\r\n settings.ASSETS_ROOT = 'FOO_ASSETS'\r\n settings.STATIC_ROOT = 'FOO_STATIC'\r\n settings.MEDIA_ROOT = 'FOO_MEDIA'\r\n # Pointing to ASSETS_ROOT\r\n assert get_env().directory.endswith('FOO_ASSETS')\r\n get_env().directory = 'BAR'\r\n assert settings.ASSETS_ROOT == 'BAR'\r\n # Pointing to STATIC_ROOT\r\n delsetting('ASSETS_ROOT')\r\n assert get_env().directory.endswith('FOO_STATIC')\r\n get_env().directory = 'BAR'\r\n assert settings.STATIC_ROOT == 'BAR'\r\n # Pointing to MEDIA_ROOT; Note we only\r\n # set STATIC_ROOT to None rather than deleting\r\n # it, a scenario that may occur in the wild.\r\n settings.STATIC_ROOT = None\r\n assert get_env().directory.endswith('FOO_MEDIA')\r\n get_env().directory = 'BAR'\r\n assert settings.MEDIA_ROOT == 'BAR'",
"def configure_test(self, test, config_json):\n pass",
"def test_reset_settings(self):\n\n self.feature_test.set_percentage(5)\n self.feature_test.add_to_whitelist(3)\n self.feature_test.add_to_blacklist(4)\n self.feature_test.reset_settings()\n\n generated = Feature(\"testing\")\n self.assertEqual(generated.percentage, 0)\n self.assertFalse(3 in generated.whitelist)\n self.assertFalse(4 in generated.blacklist)",
"def setUp(self):\n\n self.test_data_path = 'testing/test_data/'",
"def custom_settings(tmpdir_factory):\n overrides = override_settings(\n MEDIA_ROOT=str(tmpdir_factory.mktemp('test_media')))\n overrides.enable()",
"def setupTests(self, paths = [], tests = {}):\n # Used for settings only\n self.view = self.build.window.active_view()\n self._settings = {}\n for key in buildSettings:\n self._settings[key] = self._coalesceOption(key)\n self.runnerSetup(paths = paths, tests = tests)",
"def setUpTestData(cls):\n # Set up non-modified objects used by all test methods\n Prohibited.objects.create(credential_type=2, credential='google.com')\n Prohibited.objects.create(credential_type=1, credential='127.0.0.1')",
"def _load_test_data(self):\n self._save_test_data()",
"def test_set_testing(self):\n old_value = Config.testing\n Config.set_testing(True)\n\n self.assertNotEqual(old_value, Config.testing)",
"def test_setup(self, test_data: list=None):\n print(\"[dataset]: using test setup ...\")\n self.vocabulary = [\"empty\"]\n self.eval_dataset = ABSADataset(data_path=self.dev_path, mode=self.in_mode, task=self.task,\n tokenizer=self.tokenizer, vocab=\"bert\", test=True)\n return",
"def setUpConfig(self):\n pass",
"def setUp(self):\n self.dataset = self.dataset_cls()",
"def setUp(self):\n self.directory = tempfile.TemporaryDirectory()\n self.dataset = self.dataset_cls(cache_root=self.directory.name)",
"def tearDown(self):\n # set the config module level variables back to None\n config.config._conf_parser = None\n config.config._user_config_file = None",
"def setup_settings():\n # pylint: disable=import-outside-toplevel\n from django.conf import settings\n import tiny_erp.settings as defaults\n\n for name in dir(defaults):\n if name.isupper() and not hasattr(settings, name):\n setattr(settings, name, getattr(defaults, name))",
"def setUpClass(cls):\n super(Module05Tests, cls).setUpClass()\n cls.datasets = {\n 0: DATASETS_ROOT + 'diffusion_synthetic_normal_L8_r2_slices_41_50_gr15_b1200',\n 1: DATASETS_ROOT + 'filtered',\n 2: DATASETS_ROOT + 'noise'\n }\n cls.data = smns.load_object(file_path=cls.datasets[2])",
"def create_default_settings():\n from flaskbb.fixtures.settings import fixture\n create_settings_from_fixture(fixture)",
"def app(request):\n settings_override = {\n 'TESTING': True,\n }\n yield settings_override",
"def initialize_options(self):\n self.all = False\n self.coverage = False\n super(test, self).initialize_options()",
"def test_defaults(self) -> None:\n\n scratch_view: sublime.View = sublime.active_window().new_file()\n tabs: List[Tab] = [Tab(scratch_view)]\n\n data_set: Tuple[Tuple[TabSetting, bool, str], ...] = (\n (\n ShowCaptionsTabSetting,\n DEFAULT_SETINGS[\"show_captions\"],\n \"show_captions\"\n ),\n (\n IncludePathTabSetting,\n DEFAULT_SETINGS[\"include_path\"],\n \"include_path\"\n ),\n (\n ShowGroupCaptionTabSetting,\n DEFAULT_SETINGS[\"show_group_caption\"],\n \"show_group_caption\"\n )\n )\n\n for (cls, enabled, caption) in data_set:\n with self.subTest(cls=cls, enabled=enabled, caption=caption):\n inst = cls(\n self.settings,\n sublime.active_window()\n ) # type: ignore\n self.assertEqual(enabled, inst.is_enabled())\n self.assertListEqual(tabs, inst.apply(tabs))",
"def afterSetUp(self):\n self.load_config = {}\n self.load_config['monitor_interval'] = 1\n self.load_config['limit_number_request'] = 100\n self.load_config['limit_memory_used'] = 500",
"def setUp(self):\n\n fq_dataset_name = self.fq_table_names[0].split('.')\n self.fq_dataset_name = '.'.join(fq_dataset_name[:-1])\n\n fq_sandbox_name = self.fq_sandbox_table_names[0].split('.')\n self.fq_sandbox_name = '.'.join(fq_sandbox_name[:-1])\n\n super().setUp()",
"def turn_test_mode_off_by_default(test_mode_off):"
] | [
"0.69389164",
"0.6621101",
"0.6452525",
"0.64237857",
"0.64121604",
"0.6407746",
"0.6387266",
"0.6371625",
"0.6295202",
"0.6287137",
"0.6241228",
"0.6236511",
"0.62239265",
"0.6192211",
"0.6178279",
"0.61352205",
"0.6134868",
"0.6040336",
"0.6021929",
"0.6021816",
"0.6011449",
"0.6010427",
"0.6010105",
"0.6002002",
"0.5988062",
"0.5981944",
"0.59710366",
"0.5961021",
"0.59479684",
"0.5930659"
] | 0.6982843 | 0 |
Generate archive files for the given test dataset in tmpdir | def generate_test_dataset_archive(filepath, dataset):
# 'file:///some/path' to '/some/path'
if filepath[:7] == 'file://':
filepath = filepath[7:]
# Check if the dataset exists.
# When not been generate it.
if not os.path.isfile(filepath):
print("Generating", filepath)
data = get_test_dataset(dataset)
ensure_dir(os.path.dirname(filepath))
idxgz.save(filepath, data) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def generate_test_environment(tmpdir, dataset):\n\n # Overwrite settings with test settings\n generate_test_settings(tmpdir, dataset)\n\n # Generate the archive files\n for usage in ['train', 'test']:\n \n for dstype in ['images', 'labels']:\n \n dataset_type = usage + '.' + dstype\n \n mnist_dataset = 'datasets.mnist.' + dataset_type\n filepath = get_setting(mnist_dataset)\n\n test_dataset = dataset + '.' + dataset_type\n generate_test_dataset_archive(filepath, test_dataset)",
"def test_generate_test_environment(dataset):\n\n print(\"## =========================================================\")\n print(\"## Dataset:\", dataset)\n print(\"## ---------------------------------------------------------\")\n print(\"\")\n\n tmpdir = \"/tmp/collagen\"\n\n generate_test_environment(tmpdir, dataset)\n\n # Generate the archive files\n for usage in ['train', 'test']:\n for dstype in ['images', 'labels']:\n \n dataset_type = usage + '.' + dstype\n \n mnist_dataset = 'datasets.mnist.' + dataset_type\n filepath = get_setting(mnist_dataset)\n \n # 'file:///some/path' to '/some/path'\n if filepath[:7] == 'file://':\n filepath = filepath[7:]\n\n # Unpack\n print(\"\")\n print(\"{}: {}\".format(mnist_dataset, filepath))\n print(\"\")\n data = idxgz.load(filepath)\n print(\"data:\", data)\n print(\"type:\", type(data))\n print(\"dtype:\", data.dtype)\n print(\"shape:\", data.shape)\n\n print(\"\")",
"def test_dir(tmpdir):\n directory = tmpdir.mkdir('test_dir')\n for i in range(5):\n file_path = directory / 'test_{}.txt'.format(i)\n file_path.write_binary(b\"This is some test data!\")\n return directory",
"def test_create_files(self):\n\n testdir = \"test_output\"\n test_submission = Submission()\n self.addCleanup(os.remove, \"submission.tar.gz\")\n self.addCleanup(shutil.rmtree, testdir)\n\n test_submission.create_files(testdir)\n\n self.doCleanups()",
"def main(args):\n\n for dir in args.dirs:\n # prepdir = mdssprep.Directory(dir,exclude=['file_*3*','file_2??'],include=['file_*5*'],maxarchivesize=mdssprep.one_meg*200.,minsize=mdssprep.one_meg*100.)\n prepdir = mdssprep.Directory(dir)\n prepdir.archive(dryrun=False)",
"def test_archive_run(self):\n pass",
"def archive_test_logs(days, archive_path, all_logs):\n for day in days.keys():\n daydir = datetime.strptime(day, \"%Y%m%d\").strftime(\"%m-%d-%Y\")\n for scenario in days[day].keys():\n # temporary log directories are stored by scenario + date\n datename = scenario + \"-\" + datetime.strptime(day, \"%Y%m%d\").strftime(\"%Y-%m-%d\")\n if datename not in all_logs:\n raise RuntimeError(f\"Missing all_log entry for {datename}\")\n\n if not os.path.exists(all_logs[datename].name):\n raise RuntimeError(f\"Missing log directory for {datename}\")\n\n tmpdir = all_logs[datename].name\n failed = days[day][scenario][\"failed-tests\"]\n flakes = days[day][scenario][\"flaky-tests\"]\n\n scenario_archive = os.path.join(archive_path, daydir, scenario)\n os.makedirs(os.path.join(scenario_archive, \"failed\"))\n os.makedirs(os.path.join(scenario_archive, \"flakes\"))\n # data is organized by test names as keys with lists of tests\n for name in failed:\n i = 1\n for t in sorted(failed[name], key=lambda x: x[\"start_time\"]):\n try:\n logdir = kstest_logdir(tmpdir, t)\n if not os.path.exists(logdir):\n raise RuntimeError(f\"Missing logdir - {logdir}\")\n except RuntimeError:\n continue\n dst = os.path.join(scenario_archive, \"failed\", name, str(i))\n shutil.copytree(logdir, dst)\n i += 1\n\n for name in flakes:\n i = 1\n for t in sorted(flakes[name], key=lambda x: x[\"start_time\"]):\n try:\n logdir = kstest_logdir(tmpdir, t)\n if not logdir or not os.path.exists(logdir):\n raise RuntimeError(f\"Missing logdir - {logdir}\")\n except RuntimeError:\n continue\n dst = os.path.join(scenario_archive, \"flakes\", name, str(i))\n shutil.copytree(logdir, dst)\n i += 1",
"def _archive(self, name, contents, isolate_content):\n # Shared code for all test_isolated_* test cases.\n root = os.path.join(self.tmpdir, name)\n # Refuse reusing the same task name twice, it makes the whole test suite\n # more manageable.\n self.assertFalse(os.path.isdir(root), root)\n os.mkdir(root)\n isolate_path = os.path.join(root, 'i.isolate')\n with open(isolate_path, 'wb') as f:\n f.write(isolate_content)\n for relpath, content in contents.items():\n p = os.path.join(root, relpath)\n d = os.path.dirname(p)\n if not os.path.isdir(d):\n os.makedirs(d)\n with open(p, 'wb') as f:\n f.write(content)\n return self.client.isolate(isolate_path)",
"def zip_data_file(task_id, task_name, data_path):\n zip_file_dir = os.path.join(FILE_PATH, task_id + \".zip\")\n file = zipfile.ZipFile(zip_file_dir, \"w\", zipfile.ZIP_DEFLATED)\n sample_path = os.path.join(data_path, \"datasets\", str(task_id) + \"_\" + task_name + \".csv\")\n true_dag_path = os.path.join(data_path, \"true\", str(task_id) + \"_\" + task_name + \".npz\")\n file.write(sample_path)\n file.write(true_dag_path)\n file.close()\n return zip_file_dir",
"def download_test_files(request):\n\n # Log the start of the function\n logger.info(\"=========== returns ms1 test files from code directory input/ms1\")\n\n # create an absolute path to the 'example_data_dir' containing the test data files, then create\n # absolute paths to each test data file. Note the test data files are located in this code base.\n example_data_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','..','input/ms1')\n pos_input = os.path.join(example_data_dir, example_pos_filename)\n neg_input = os.path.join(example_data_dir, example_neg_filename)\n tracer_file = os.path.join(example_data_dir, example_tracer_filename)\n run_sequence_pos_file = os.path.join(example_data_dir, example_run_sequence_pos_filename)\n run_sequence_neg_file = os.path.join(example_data_dir, example_run_sequence_neg_filename)\n\n # create filenames\n filename1 = 'ms1_pos_input_test_data.csv'\n filename2 = 'ms1_neg_input_test_data.csv'\n filename3 = 'ms1_tracer_test_data.csv'\n filename4 = 'ms1_run_sequence_pos_test_data.csv'\n filename5 = 'ms1_run_sequence_neg_test_data.csv'\n\n # List of files to be zipped\n files_to_zip = {filename1: pos_input, filename2: neg_input, filename3: tracer_file, filename4: run_sequence_pos_file, filename5: run_sequence_neg_file}\n\n # Create an in-memory zip file\n in_memory_zip = BytesIO()\n with ZipFile(in_memory_zip, 'w', ZIP_DEFLATED) as zipf:\n # Add each file to the zipfile\n for filename in files_to_zip:\n logger.info('filename: {}'.format(filename))\n file_path = files_to_zip[filename]\n with open(file_path, 'rb') as file:\n file_content = file.read()\n zipf.writestr(filename, file_content)\n # The ZipFile object is automatically closed when exiting the 'with' block\n\n zip_filename = \"ms1_test_data_files.zip\"\n # Create an HTTP response with the zip file attached for download\n response = HttpResponse(in_memory_zip.getvalue(),content_type='application/zip')\n response['Content-Disposition'] = 'attachment; filename=' + zip_filename\n response['Content-length'] = in_memory_zip.tell()\n\n # Return the HTTP response\n return response",
"def MakeDataSetFiles(dirname):\n\n\n if not os.path.exists(dirname):\n os.mkdir(dirname)\n if not os.path.exists(os.path.join(dirname, 'train')):\n os.mkdir(os.path.join(dirname, 'train'))\n if not os.path.exists(os.path.join(dirname, 'test')):\n os.mkdir(os.path.join(dirname, 'test'))\n data_train = fetch_20newsgroups(subset='train', categories=None, shuffle=True, random_state=42)\n data_test = fetch_20newsgroups(subset='test', categories=None, shuffle=True, random_state=42)\n\n if dirname[-1] == '/' or dirname[-1] == '\\\\':\n dirname = dirname[:-1]\n \n Util.WriteClassFile(data_train.target, os.path.join(dirname, 'train_classes.txt'))\n Util.WriteClassFile(data_test.target,os.path.join(dirname, 'test_classes.txt'))\n\n\n train_counter = 0;\n for doc in data_train.data:\n filename = 'train_' + str(train_counter).zfill(5);\n f = file(os.path.join(dirname, 'train', filename), 'w');\n f.write(doc.encode('ascii', 'ignore'));\n f.close();\n train_counter = train_counter + 1;\n\n test_counter = 0;\n for doc in data_test.data:\n filename = 'test_' + str(test_counter).zfill(5);\n f = file(os.path.join(dirname, 'test', filename), 'w');\n f.write(doc.encode('ascii', 'ignore'));\n f.close();\n test_counter = test_counter + 1;\n\n class_index = file(os.path.join(dirname, 'class_label_index.txt'), 'w')\n for label in data_train.target_names:\n class_index.write(label + '\\n')\n class_index.close()",
"def fixture_out_dir(tmpdir_factory) -> Path:\n my_tmpdir = Path(tmpdir_factory.mktemp(\"out\"))\n yield my_tmpdir\n shutil.rmtree(str(my_tmpdir))",
"def zip_files():\n zipper = ZipFile(\"Moritz_Bunse_ML_project.zip\", \"w\")\n files_to_write = [\"poi_id.py\",\n \"my_classifier.pkl\",\n \"my_dataset.pkl\",\n \"my_feature_list.pkl\",\n \"tester.py\",\n \"Look+At+Enron+data+set.html\",\n \"Look At Enron data set.ipynb\",\n \"data_dict.pkl\",\n \"final_project_dataset.pkl\",\n \"img/Flow chart feature selection.png\"\n ]\n for filename in files_to_write:\n zipper.write(filename)\n\n zipper.close()",
"def populated_archivist_dataset(archivist_dataset, tmp_path_factory):\n wpath = tmp_path_factory.mktemp(\"archivistds\")\n\n ads = archivist_dataset\n\n dscontent = (\n ('azip/file1.txt', 'zipfile1'),\n ('azip/file2.csv', 'zipfile2_muchcontent'),\n ('atar/file1.txt', 'tarfile1'),\n ('atar/file2.csv', 'tarfile2_muchcontent'),\n )\n srcds = Dataset(wpath / 'srcds').create(**nonoise)\n for fpath, fcontent in dscontent:\n fpath = srcds.pathobj / (PurePosixPath(fpath))\n fpath.parent.mkdir(parents=True, exist_ok=True)\n fpath.write_text(fcontent)\n srcds.save(**nonoise)\n\n archive_root = wpath / 'myarchive'\n #archivetype = 'zip'\n\n akeys = {}\n\n # no ZIP just yet\n # for archivetype, ext in (('zip', ''), ('tar', '.gz')):\n for archivetype, ext in (('tar', '.gz'), ):\n archive_path = Path(f\"{archive_root}.{archivetype}{ext}\")\n\n archive_path_inds = ads.pathobj / '.archives' / archive_path.name\n # create an archive, the easy way, by simply exporting the\n # entire dataset worktree\n srcds.export_archive(archive_root, archivetype=archivetype,\n **nonoise)\n assert archive_path.exists()\n\n # add the archive (in a hidden dir) to be able to reference\n # it via a key\n aurl = archive_path.as_uri()\n ads.repo.call_annex([\n 'addurl', '--file', str(archive_path_inds), aurl])\n ads.save(**nonoise)\n # get the key of the archive\n akeys[archivetype] = ads.status(\n archive_path_inds, annex='basic', return_type='item-or-list',\n **nonoise)['key']\n return ads, akeys, archive_root, dscontent",
"def sample_input_dir():\n tmpdir = tempfile.mkdtemp()\n input_zip = os.path.join(ASSETS_DIR, 'input_dir.zip')\n with zipfile.ZipFile(input_zip, \"r\") as zip_ref:\n zip_ref.extractall(tmpdir)\n yield tmpdir\n shutil.rmtree(tmpdir)",
"def archive_experiment(experiment_dir: str,\n dst_dir: str,\n save_extensions: Union[str, Sequence[str]]='py',\n exclude_dirs: Union[str, Sequence[str]]='output',\n archive_format: str='zip',\n base_name: Optional[str]=None):\n # Format save_extensions for consistency\n # Make into a sequence\n if isinstance(save_extensions, str):\n save_extensions = [save_extensions]\n # Drop any .'s\n save_extensions = [s.strip('.') for s in save_extensions]\n # Format exclude_dirs for consistency\n if isinstance(exclude_dirs, str):\n exclude_dirs = [exclude_dirs]\n # Get default base name\n if base_name is None:\n experiment_path = os.path.abspath(experiment_dir)\n base_name = [p for p in experiment_path.split('/') if p][-1]\n\n # Full name of the archive name uses a time stamp\n timestamp = time.strftime('%b%d%Y_%H%M%S')\n archive_name = f'{base_name}_{timestamp}'\n\n # Use a temporary folder to create the archive\n tmp_folder = f'/tmp/{str(uuid.uuid4())}'\n if os.path.exists(tmp_folder):\n shutil.rmtree(tmp_folder)\n os.makedirs(tmp_folder)\n tmp_experiment = os.path.join(tmp_folder, archive_name)\n os.makedirs(tmp_experiment)\n\n # Recurse through the experiment directory and non-'output' subdirectories,\n # saving files to the temporary folder\n dirs_to_check = [experiment_dir]\n while len(dirs_to_check) > 0:\n # A directory to check (DTC), relative to the experiment_dir\n dtc = dirs_to_check.pop(0)\n # Full path to the DTC\n full_dtc = dtc if dtc == experiment_dir \\\n else os.path.join(experiment_dir, dtc)\n # List of all files and folders in the DTC\n dlist = os.listdir(full_dtc)\n # List of all files in the DTC\n files = [d for d in dlist\n if os.path.isfile(os.path.join(full_dtc, d))]\n # Check each file to see if it should be archived.\n for f in files:\n if f.split('.')[-1] in save_extensions:\n # Recreate the file structure inside experiment_dir, up to\n # the folder containing f\n tmp_save_dir = tmp_experiment if dtc == experiment_dir \\\n else os.path.join(tmp_experiment, dtc)\n os.makedirs(tmp_save_dir, exist_ok=True)\n # Save a copy of f\n shutil.copy2(os.path.join(full_dtc, f), tmp_save_dir)\n\n # Get non-excluded subdirectories\n subdirs = [d for d in dlist\n if os.path.isdir(os.path.join(full_dtc, d))\n and d not in exclude_dirs]\n # Track subdirectories as paths relative to the experiment dir\n if dtc != experiment_dir and len(subdirs) > 0:\n subdirs = [os.path.join(dtc, d) for d in subdirs]\n\n dirs_to_check += subdirs\n\n # At this point, all archivable files and folders are saved in tmp_folder.\n # Create an archive, coincidentally the same name as tmp_experiment's path\n tmp_archive = tmp_experiment[:]\n shutil.make_archive(tmp_archive, archive_format, tmp_folder, archive_name)\n # Get the full name of the archive. There should only be one file in\n # tmp_experiment\n tmp_archive_full = [f for f in os.listdir(tmp_folder)\n if os.path.isfile(os.path.join(tmp_folder, f))][0]\n # Copy the archive to its destination\n os.makedirs(dst_dir, exist_ok=True)\n shutil.move(os.path.join(tmp_folder, tmp_archive_full),\n os.path.join(dst_dir, tmp_archive_full),\n copy_function=shutil.copyfile)\n # Remove the temporary folder\n shutil.rmtree(tmp_folder)\n\n pass",
"def write_output_files(input_path, output_path, out_data, random = False):\n create_directory_structure(output_path)\n for city in cities:\n # set relevant list\n data_dir = os.path.join(input_path, city, city+'_test')\n sub_files = list_filenames(data_dir)\n for f in sub_files:\n # load data\n outfile = os.path.join(output_path, city, city+'_test',f)\n if random:\n out = np.random.randint(256, size=(5,3,495,436,3), dtype = np.dtype(np.uint8))\n else:\n out = out_data\n write_data(out, outfile)\n print(\"just wrote file {}\".format(outfile))",
"def compress_skim_dir(directory, output=\"zarr\"):\n\n if output not in (\"zarr\", \"zarr.zip\"):\n raise NotImplementedError(output)\n\n if output == \"zarr\":\n if not os.path.exists(directory+\".zarr\"):\n os.makedirs(directory+\".zarr\")\n elif output == \"zarr.zip\":\n if os.path.exists(directory+\".zarr.zip\"):\n raise FileExistsError(directory+\".zarr.zip\")\n\n master = {}\n for f in os.walk(directory):\n for fi in f[2]:\n if \".emx\" in fi:\n arr = np.fromfile(fi, dtype='f4')\n side = int(np.sqrt(arr.size))\n arr = arr.reshape(side, side)\n tazrange = pd.RangeIndex(1, side+1)\n master[fi.replace(\".emx\", \"\")] = xr.DataArray(\n arr,\n dims=['otaz', 'dtaz'],\n coords={'otaz': tazrange, 'dtaz': tazrange}\n )\n\n master = sh.Dataset(master)\n\n if output == \"zarr\":\n master.to_zarr(directory+\".zarr\", mode='a')\n elif output == \"zarr.zip\":\n with zarr.ZipStore(directory+\".zarr.zip\", mode='w') as store:\n master.to_zarr(store)\n return master",
"def create_temp_archive(case_dict):\n # ---------------------------------------------------------------------\n archive_temp_dir = \"{0}/archive_temp_dir\".format(case_dict[\"workdir\"])\n logger.debug(\"create_temp_archive %s\", archive_temp_dir)\n\n if not os.path.exists(archive_temp_dir):\n os.makedirs(archive_temp_dir)\n else:\n logger.info(\n \"ERROR archive_metadata archive_temp_dir already exists. exiting...\"\n )\n sys.exit(1)\n\n return archive_temp_dir",
"def generateDataset(self):\n if self.outdir[-1] != \"/\": \n self.outdir += \"/\"\n self.outdir += \"dataset_trackml\"\n i = 1\n while os.path.exists(self.outdir):\n self.outdir.replace(\"_\"+str(i-1), \"\")\n self.outdir += (\"_\"+str(i))\n i += 1\n cmd = \"mkdir -p \"+ self.outdir\n os.system(cmd)\n\n cont = pc.particleController()\n cont.generateEvents(self.numevents, self.hpe, self.detectors)\n\n self.generateHits(cont)\n self.generateTruths(cont)\n self.generateSolution(cont)",
"def sample_series_dirs():\n tmp_dir = tempfile.mkdtemp()\n # Extract Series\n os.mkdir(os.path.join(tmp_dir, \"series_dir\"))\n series_dir_series = os.path.join(tmp_dir, \"series_dir\")\n series_zip = os.path.join(ASSETS_DIR, 'series_dir_series.zip')\n with zipfile.ZipFile(series_zip, \"r\") as zip_ref:\n zip_ref.extractall(series_dir_series)\n # Extract Animes\n os.mkdir(os.path.join(tmp_dir, \"anime_dir\"))\n series_dir_anime = os.path.join(tmp_dir, \"anime_dir\")\n anime_zip = os.path.join(ASSETS_DIR, 'series_dir_anime.zip')\n with zipfile.ZipFile(anime_zip, \"r\") as zip_ref:\n zip_ref.extractall(series_dir_anime)\n\n yield [series_dir_series, series_dir_anime]\n shutil.rmtree(tmp_dir)",
"def test_create_daily_archives_non_daily_operator_files(self, *args):\n start_date = DateHelper().this_month_start\n\n file_path = \"path\"\n\n context = {\"version\": \"1\"}\n expected = [file_path]\n result = create_daily_archives(\n 1, \"10001\", self.ocp_provider_uuid, \"file\", \"path\", self.ocp_manifest_id, start_date, context=context\n )\n self.assertEqual(result, expected)",
"def _test_path(self, request, artifact_dir):\n self.test_path = artifact_dir / request.module.__name__ / request.node.name\n self.test_path.mkdir(parents=True, exist_ok=True)\n self.export_path = self.test_path / \"sample_processed.nii.gz\"",
"def _generate_examples(self, archive):\n\n for fname, fobj in archive:\n image_dir, image_file = os.path.split(fname)\n d = os.path.basename(image_dir)\n record = {'image': fobj, 'label': d}\n yield \"%s/%s\" % (image_file, d), record",
"def test_RandomDatasetGenerator_SampleZip(temp_dir: pathlib.Path):\n generator = make_dataset.RandomDatasetGenerator(\n start_time_seconds_since_epoch=time.mktime(\n time.strptime(\"1/1/2018\", \"%m/%d/%Y\")\n ),\n locations=[\"My House\", \"The Office\", \"A Restaurant\",],\n names=[\n \"Work\",\n \"Home\",\n \"Sleep\",\n \"Fun\",\n \"Commute to work\",\n \"Commute to home\",\n ],\n )\n\n generator.SampleZip(temp_dir / \"LC_export.zip\", 100)\n\n with zipfile.ZipFile(temp_dir / \"LC_export.zip\") as z:\n with z.open(\"LC_export.csv\") as f:\n # Read and decode the compressed CSV into a string.\n string = f.read().decode(\"utf-8\")\n reader = csv.reader(string.split(\"\\n\"))\n rows = [row for row in reader]\n\n # One line for the header.\n assert len(rows) == 103\n\n # All lines except the second and last have eight columns.\n assert len(rows[0]) == 8\n for row in rows[2:-1]:\n assert len(row) == 8",
"def _clean_up_temporary_files(dataset_dir):\n return",
"def compress_wrapper(args: Namespace) -> None:\n directory_path = os.path.join(DATASETS_DIR, args.directory)\n compress_datasets(directory_path, args.holdout)",
"def generate_all_files():\n for (name, fn) in lang_module.targets.items():\n path = of_g.options.install_dir + '/' + name\n os.system(\"mkdir -p %s\" % os.path.dirname(path))\n with open(path, \"w\") as outfile:\n fn(outfile, os.path.basename(name))\n print(\"Wrote contents for \" + name)",
"def generate_archive_file(location, paths, environment=None, compression=None, archive_format=None):\n if archive_format == 'zip':\n archive = ZipTarWrapper(location.name, 'w', zipfile.ZIP_DEFLATED)\n else:\n write_type = \"w\"\n if compression:\n write_type = \"w|{0}\".format(compression)\n archive = tarfile.open(location.name, write_type)\n\n # Add all the things to the archive\n for path_spec in paths:\n path_spec.add_to_tar(archive, environment)\n\n # Finish the zip\n archive.close()\n\n return archive",
"def create_test_input_files(input1, input2):\n random.shuffle(input1)\n random.shuffle(input2)\n filename1 = application.join_abs_path(EMPTY_TEST_DIR, 'file-1.gz')\n filename2 = application.join_abs_path(EMPTY_TEST_DIR, 'file-2.gz')\n\n with gzip.open(filename1, 'wb') as file1:\n file1.write('\\n'.join(input1))\n with gzip.open(filename2, 'wb') as file2:\n file2.write('\\n'.join(input2))"
] | [
"0.7358341",
"0.6791461",
"0.6418583",
"0.63508826",
"0.63468665",
"0.6301004",
"0.6291766",
"0.61660707",
"0.6096657",
"0.6091523",
"0.60853094",
"0.6035293",
"0.602757",
"0.59755576",
"0.595753",
"0.59322333",
"0.5913556",
"0.5871974",
"0.58634",
"0.5859327",
"0.58383375",
"0.579577",
"0.5780835",
"0.5780175",
"0.57778054",
"0.57703567",
"0.576039",
"0.57591206",
"0.5756717",
"0.5731769"
] | 0.75224614 | 0 |
Generate a test environment using the given dataset. The settings are temporarily overwritten to use the test data. | def generate_test_environment(tmpdir, dataset):
# Overwrite settings with test settings
generate_test_settings(tmpdir, dataset)
# Generate the archive files
for usage in ['train', 'test']:
for dstype in ['images', 'labels']:
dataset_type = usage + '.' + dstype
mnist_dataset = 'datasets.mnist.' + dataset_type
filepath = get_setting(mnist_dataset)
test_dataset = dataset + '.' + dataset_type
generate_test_dataset_archive(filepath, test_dataset) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def generate_environment(dataset, tmpdir):\n\n print(\">>> Test environment:\")\n print(\"dataset:\", dataset)\n print(\"tmpdir:\", tmpdir)\n\n generate_test_environment(tmpdir, dataset)\n\n return { 'dataset': dataset, 'tmpdir': tmpdir }",
"def test_generate_test_environment(dataset):\n\n print(\"## =========================================================\")\n print(\"## Dataset:\", dataset)\n print(\"## ---------------------------------------------------------\")\n print(\"\")\n\n tmpdir = \"/tmp/collagen\"\n\n generate_test_environment(tmpdir, dataset)\n\n # Generate the archive files\n for usage in ['train', 'test']:\n for dstype in ['images', 'labels']:\n \n dataset_type = usage + '.' + dstype\n \n mnist_dataset = 'datasets.mnist.' + dataset_type\n filepath = get_setting(mnist_dataset)\n \n # 'file:///some/path' to '/some/path'\n if filepath[:7] == 'file://':\n filepath = filepath[7:]\n\n # Unpack\n print(\"\")\n print(\"{}: {}\".format(mnist_dataset, filepath))\n print(\"\")\n data = idxgz.load(filepath)\n print(\"data:\", data)\n print(\"type:\", type(data))\n print(\"dtype:\", data.dtype)\n print(\"shape:\", data.shape)\n\n print(\"\")",
"def generate_test_settings(tmpdir, dataset):\n\n # When `tmpdir` is a path convert it to a string\n if isinstance(tmpdir, py._path.local.LocalPath):\n tmpdir = str(tmpdir)\n \n test_settings = {\n \n 'datasets': {\n 'mnist': {\n 'train': {\n 'images': \"file://\" + tmpdir + \"/\" + dataset + \"/server/train-images-idx3-ubyte.gz\",\n 'labels': \"file://\" + tmpdir + \"/\" + dataset + \"/server/train-labels-idx1-ubyte.gz\"\n },\n 'test': {\n 'images': \"file://\" + tmpdir + \"/\" + dataset + \"/server/t10k-images-idx3-ubyte.gz\",\n 'labels': \"file://\" + tmpdir + \"/\" + dataset + \"/server/t10k-labels-idx1-ubyte.gz\"\n },\n },\n },\n 'data-dir': tmpdir + \"/\" + dataset + \"/data\"\n }\n overwrite_settings(test_settings)",
"def run(\n dataset,\n setting\n ):\n \n log_setting = setting if setting else \"default\" \n logger.debug(\"Create setting '{0}' from dataset '{1}'\".format(log_setting, dataset))\n\n if dataset in expmgmt.config.settings.get_datasets():\n expmgmt.config.settings.set_dataset(\n dataset,\n setting\n )",
"def setUp(self):\n self.dataset = get_test_dataset()",
"def autogen_dataset_dir_with_test():\n return TabularDataset.autogen('tests/data/dummy_tabular',\n test_path='tests/data/dummy_tabular_test',\n seed=42,\n sep=',')",
"def autogen_dataset_with_test():\n return TabularDataset.autogen('tests/data/dummy_tabular/train.csv',\n test_path='tests/data/dummy_tabular_test/test.csv',\n seed=42,\n sep=',')",
"def test_setup(self, test_data: list=None):\n print(\"[dataset]: using test setup ...\")\n self.vocabulary = [\"empty\"]\n self.eval_dataset = ABSADataset(data_path=self.dev_path, mode=self.in_mode, task=self.task,\n tokenizer=self.tokenizer, vocab=\"bert\", test=True)\n return",
"def _generate_datasets(self):\n\n degrade_test = False\n if self._opts['degrade_step'] == 'test':\n degrade_test = True\n\n use_trainset_for_tests = UseTrainForTest.IDENTICAL # can be different in few shot workflow\n\n train_dataset, test_dataset = self._gen_datasets_with_options(self._opts['train_classes'],\n self._opts['test_classes'],\n is_superclass=self._opts['superclass'],\n class_proportion=self._opts['class_proportion'],\n degrade_test=degrade_test,\n degrade_type=self._opts['degrade_type'], # only relevant if degrade_test = True\n degrade_val=self._opts['min_val'], # only relevant if degrade_test = True\n recurse_train=self._is_train_recursive(),\n recurse_test=self._is_inference_recursive(),\n num_batch_repeats=self._opts['num_repeats'],\n recurse_iterations=self._opts['recurse_iterations'],\n evaluate_step=self._opts['evaluate'],\n use_trainset_for_tests=use_trainset_for_tests,\n invert_images=self._opts['invert_images'],\n min_val=self._opts['min_val'])\n return train_dataset, test_dataset",
"def setUp(self):\n self.directory = tempfile.TemporaryDirectory()\n self.dataset = self.dataset_cls(cache_root=self.directory.name)",
"def setUp(self):\n self.dataset = self.dataset_cls()",
"def prepare_data(dataset, train_ratio=0.8, input_dim=None, seed=10):\n # Retrieve main path of project\n dirname = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))\n\n # Download and store dataset at chosen location\n if dataset == 'Cora' or dataset == 'PubMed' or dataset == 'Citeseer':\n path = os.path.join(dirname, 'data')\n data = Planetoid(path, name=dataset, split='full')[0]\n data.name = dataset\n data.num_classes = (max(data.y)+1).item()\n # data.train_mask, data.val_mask, data.test_mask = split_function(data.y.numpy())\n # data = Planetoid(path, name=dataset, split='public', transform=T.NormalizeFeatures(), num_train_per_class=20, num_val=500, num_test=1000)\n\n elif dataset == 'Amazon':\n path = os.path.join(dirname, 'data', 'Amazon')\n data = Amazon(path, 'photo')[0]\n data.name = dataset\n data.num_classes = (max(data.y)+1).item()\n data.train_mask, data.val_mask, data.test_mask = split_function(\n data.y.numpy(), seed=seed)\n # Amazon: 4896 train, 1224 val, 1530 test\n \n elif dataset in ['syn1', 'syn2', 'syn4', 'syn5']: \n data = synthetic_data(\n dataset, dirname, train_ratio, input_dim)\n \n elif dataset == 'syn6':\n data = gc_data(dataset, dirname, train_ratio)\n\n elif dataset == 'Mutagenicity':\n data = gc_data(dataset, dirname, train_ratio)\n\n return data",
"def get_dataset(test_envs, args, hparams, algorithm_class=None):\n is_mnist = \"MNIST\" in args.dataset\n dataset = vars(datasets)[args.dataset](args.data_dir, test_envs)\n # if not isinstance(dataset, MultipleEnvironmentImageFolder):\n # raise ValueError(\"SMALL image datasets are not implemented (corrupted), for transform.\")\n\n in_splits = []\n out_splits = []\n for env_i, env in enumerate(dataset):\n # The split only depends on seed_hash (= trial_seed).\n # It means that the split is always identical only if use same trial_seed,\n # independent to run the code where, when, or how many times.\n out, in_ = split_dataset(\n env,\n int(len(env) * args.holdout_fraction),\n misc.seed_hash(args.trial_seed, env_i),\n )\n if env_i in test_envs:\n in_type = \"test\"\n out_type = \"test\"\n else:\n in_type = \"train\"\n out_type = \"valid\"\n\n if is_mnist:\n in_type = \"mnist\"\n out_type = \"mnist\"\n\n set_transfroms(in_, in_type, hparams, algorithm_class)\n set_transfroms(out, out_type, hparams, algorithm_class)\n\n if hparams[\"class_balanced\"]:\n in_weights = misc.make_weights_for_balanced_classes(in_)\n out_weights = misc.make_weights_for_balanced_classes(out)\n else:\n in_weights, out_weights = None, None\n in_splits.append((in_, in_weights))\n out_splits.append((out, out_weights))\n\n return dataset, in_splits, out_splits",
"def create_sandbox_dataset(project_id, dataset_id):\n sandbox_dataset_id = get_sandbox_dataset_id(dataset_id)\n friendly_name = f'Sandbox for {dataset_id}'\n description = f'Sandbox created for storing records affected by the cleaning rules applied to {dataset_id}'\n label_or_tag = {'label': '', 'tag': ''}\n create_dataset(project_id=project_id,\n dataset_id=sandbox_dataset_id,\n friendly_name=friendly_name,\n description=description,\n label_or_tag=label_or_tag,\n overwrite_existing=False)\n\n return sandbox_dataset_id",
"def __create_test_environment(self):\n os.chdir(self.wd)\n temp_dir = tempfile.gettempdir()\n self.test_root = os.path.join(temp_dir, \"test-grpc\")\n print(\"Creating testing environment in {}\".format(self.test_root))\n if os.path.exists(self.test_root):\n # delete any previous environment\n shutil.rmtree(self.test_root)\n # create root directory\n os.makedirs(self.test_root)\n def copy_app(name):\n app_root = os.path.join(self.test_root, name)\n os.makedirs(app_root)\n filename = \"grpc-{}\".format(name)\n src = os.path.join(self.args.bin, filename)\n dst = os.path.join(app_root, filename)\n shutil.copy(src, dst)\n return dst\n # copy client and server into the new test environment\n self.server_path = copy_app(\"server\")\n self.client_path = copy_app(\"client\")",
"def setUpTestData(cls):\n data_gen.run()",
"def setUpTestData(cls):\n data_gen.run()",
"def setUpTestData(cls):\n data_gen.run()",
"def setUpTestData(cls):\n data_gen.run()",
"def test_dataset_autogen(autogen_dataset):\n train_dummy = \"eget, venenatis a, magna. Lorem ipsum dolor sit amet, consectetuer\"\n val_dummy = \"leo. Vivamus nibh dolor, nonummy ac, feugiat non, lobortis quis,\"\n test_dummy = \"turpis egestas. Aliquam fringilla cursus purus. Nullam scelerisque neque sed\"\n\n assert autogen_dataset.train[0][0] == train_dummy\n assert autogen_dataset.train[0][1] == '8'\n assert len(autogen_dataset.train) == 64\n\n assert autogen_dataset.val[0][0] == val_dummy\n assert autogen_dataset.val[0][1] == '1'\n assert len(autogen_dataset.val) == 16\n\n assert autogen_dataset.test[0][0] == test_dummy\n assert autogen_dataset.test[0][1] == '6'\n assert len(autogen_dataset.test) == 20",
"def init_data(dataset_config: dict):\n # train and dev will be in random order, test may be ordered according to labels\n if dataset_config[\"name\"] == \"CoLA\":\n train, dev, test, num_classes = load_cola(dataset_config)\n elif dataset_config[\"name\"] == \"AGNews\":\n train, dev, test, num_classes = load_ag_news(dataset_config)\n elif dataset_config[\"name\"] == \"DBPedia\":\n train, dev, test, num_classes = load_dbpedia(dataset_config)\n elif dataset_config[\"name\"] == \"YRF\":\n train, dev, test, num_classes = load_yrf(dataset_config)\n else:\n raise NameError(f\"Dataset {dataset_config['name']} not implemented.\")\n # etc.\n\n # shrink size if debugging\n if dataset_config[\"debug\"]:\n # choose a random subset using huggingface select function\n train = train.select(random.sample(range(len(train)), k=200))\n dev = dev.select(random.sample(range(len(dev)), k=40))\n test = test.select(random.sample(range(len(test)), k=200))\n\n # create class imbalance\n random.seed(dataset_config[\"seed\"])\n if dataset_config[\"pool_balance\"] == \"balanced\":\n pass\n elif dataset_config[\"pool_balance\"] == \"imbalanced\":\n train = train.filter(lambda example: create_imbalanced_dataset(example, dataset_config[\"imbalance_prop\"], dataset_config['imbalance_cls']))\n else:\n NameError(f\"pool_balance = {dataset_config['pool_balance']} not allowed\")\n\n if dataset_config[\"dev_balance\"] == \"balanced\":\n pass\n elif dataset_config[\"dev_balance\"] == \"imbalanced\":\n dev = dev.filter(lambda example: create_imbalanced_dataset(example, dataset_config[\"imbalance_prop\"], dataset_config['imbalance_cls']))\n else:\n NameError(f\"dev_balance = {dataset_config['dev_balance']} not allowed\")\n\n # get seed labelled pool indices (using the same seed data every time)\n random.seed(dataset_config[\"seed\"])\n if dataset_config[\"seed_balance\"] == \"balanced\":\n # this is random (will have some variance vs pool)\n indices = list(range(len(train)))\n unlabelled_pool_idx, labelled_pool_idx = split(\n indices,\n random_state=dataset_config[\"seed\"],\n test_size=dataset_config[\"seed_size\"]\n )\n elif dataset_config[\"seed_balance\"] == \"stratified\":\n # this is the same as the underlying train set (which may be unbalanced)\n indices = list(range(len(train)))\n unlabelled_pool_idx, labelled_pool_idx = split(\n indices,\n random_state=dataset_config[\"seed\"],\n test_size=dataset_config[\"seed_size\"],\n stratify=train['label']\n )\n elif dataset_config[\"seed_balance\"] == \"imbalanced\":\n # artificially sample an imbalanced seed set from the pool\n unlabelled_pool_idx, labelled_pool_idx = create_imbalanced_seed(\n train,\n num_classes,\n dataset_config[\"seed_size\"],\n dataset_config['imbalance_prop'],\n dataset_config['imbalance_cls']\n )\n else:\n raise NameError(f\"seed_balance = {dataset_config['seed_balance']} not allowed\")\n\n return train, dev, test, num_classes, labelled_pool_idx, unlabelled_pool_idx",
"def setUp(self):\n\n # Create a data pipe.\n self.interpreter.pipe.create('test', 'mf')\n\n # Create a temporary file name.\n ds.tmpfile = mktemp()",
"def setUp(self):\n\n # Create the data pipe.\n self.interpreter.pipe.create('dasha', 'mf')\n\n # Create a temporary directory for Dasha outputs.\n ds.tmpdir = mkdtemp()",
"def create_dataset():\n with open(\"/root/config.json\", \"r\") as f:\n config = json.load(f)\n\n # create environmental variables\n for (key, value) in config.items():\n os.environ[key] = str(value)\n\n # run blender\n command = '/usr/lib/blender/blender {} --python {} --background'.\\\n format(\"/root/models/default.blend\", \"/root/rendering.py\")\n os.system(command)\n\n # post processing\n post_processing()",
"def make_dataset():\n\n\tnumberOfTrials = dataset_params.num_of_samples\n\tnumberOfTrials_train = int(numberOfTrials*0.8)\n\tnumberOfTrials_test = int(numberOfTrials*0.2)\n\n\tprint(\"==================================================\")\n\tprint(\"1. Generating Train images ......\")\n\tprint(\"\\nTrain image per variation\", numberOfTrials_train)\n\tmakeDataset(numberOfTrials_train, \"train\")\n\n\tprint(\"==================================================\")\n\tprint(\"2. Generating Test images ......\")\n\tprint(\"\\nTest image per variation\", numberOfTrials_test)\n\tmakeDataset(numberOfTrials_test, \"test\")\n\n\tprint(\"==================================================\")\n\tprint(\"Done!!!\")",
"def autogen_dataset():\n return TabularDataset.autogen('tests/data/dummy_tabular/train.csv',\n seed=42,\n sep=',')",
"def setup(self):\n # TODO check if need both dataset together\n self.train_dataset = ABSADataset(data_path=self.train_path, mode=self.in_mode, task=self.task, \n tokenizer=self.tokenizer, vocab=\"bert\")\n self.vocabulary = self.train_dataset.vocabulary\n\n self.eval_dataset = ABSADataset(data_path=self.dev_path, mode=self.in_mode, task=self.task,\n tokenizer=self.tokenizer, vocab=self.vocabulary)\n #self.train_restaurant = ABSADataset(data_path=RESTAURANT_TRAIN)\n #self.eval_restaurant = ABSADataset(data_path=RESTAURANT_DEV)",
"def setUpClass(cls):\n cls.checkpoint = 10\n cls.dataset = 'I-AM-A-TEST-DATASET'\n cls.experiment = 'I-AM-A-TEST-FOLDER'\n cls.tokenizer_parameters = {\n 'is_uncased': [False, True],\n 'tokenizer': [\n ('char_dict', lmp.tokenizer.CharDictTokenizer),\n ('char_list', lmp.tokenizer.CharListTokenizer),\n ('whitespace_dict', lmp.tokenizer.WhitespaceDictTokenizer),\n ('whitespace_list', lmp.tokenizer.WhitespaceListTokenizer),\n ],\n }\n cls.test_dir = os.path.join(lmp.path.DATA_PATH, cls.experiment)\n os.makedirs(cls.test_dir)",
"def setUp(self):\n\n self.test_data_path = 'testing/test_data/'",
"def main(unused_argv):\n del unused_argv\n if not os.path.exists(FLAGS.data_dir):\n os.makedirs(FLAGS.data_dir)\n\n tfds_cached_dict = {}\n data_dir = FLAGS.tfds_data_dir if FLAGS.tfds_data_dir else None\n name = FLAGS.dataset_name\n tfds_cached_dict[name] = tfds.load(name, batch_size=-1, data_dir=data_dir)\n dataset_dict = tfds_cached_dict[name]\n dataset_dict[tfds.Split.TRAIN] = tfds.as_numpy(\n dataset_dict[tfds.Split.TRAIN])\n dataset_dict[tfds.Split.TEST] = tfds.as_numpy(\n dataset_dict[tfds.Split.TEST])\n # To mock the API of tfds.load to cache the downloaded datasets.\n # Used as an argument to `get_dataset`.\n def load_fn(name, data_dir=None, batch_size=-1):\n # This function will always return the whole dataset.\n assert batch_size == -1\n del data_dir\n del batch_size\n return tfds_cached_dict[name]\n class_ids = sorted([int(x) for x in FLAGS.class_ids])\n num_classes = len(class_ids)\n for i in range(num_classes):\n for j in range(i+1, num_classes):\n print('Generating pos {} neg {}'.format(i, j))\n positive_class = class_ids[i]\n negative_class = class_ids[j]\n random_seeds = range(FLAGS.min_data_seed, FLAGS.max_data_seed)\n for seed in random_seeds:\n dataset = create_projected_binary_dataset(\n FLAGS.dataset_name, positive_class, negative_class,\n FLAGS.num_train_examples, FLAGS.num_valid_examples,\n FLAGS.num_test_examples, FLAGS.projected_dim, seed, load_fn)\n filename = 'binary_{}-pos_{}-neg_{}-dim_{}-seed_{}'.format(\n FLAGS.dataset_name, positive_class, negative_class,\n FLAGS.projected_dim, seed)\n serialized_dataset = dataset.SerializeToString()\n\n with open(os.path.join(FLAGS.data_dir, filename), 'wb') as f:\n f.write(serialized_dataset)"
] | [
"0.7599391",
"0.72667193",
"0.69813424",
"0.66441184",
"0.64025056",
"0.6268748",
"0.625678",
"0.6244715",
"0.61173195",
"0.60907793",
"0.60787404",
"0.60093623",
"0.5926749",
"0.5911557",
"0.5894079",
"0.58914727",
"0.58914727",
"0.58914727",
"0.58914727",
"0.58877015",
"0.5883739",
"0.5851698",
"0.58320737",
"0.58311224",
"0.57986146",
"0.5752506",
"0.57058454",
"0.5705715",
"0.56993276",
"0.56850374"
] | 0.7804764 | 0 |
Extracts (typically) overlapping regular patches from a grayscale image Changing the offset and stride parameters will result in images reconstructed by reconstruct_from_grayscale_patches having different dimensions! Callers should pad and unpad as necessary! | def extract_grayscale_patches( img, shape, offset=(0,0), stride=(1,1) ):
px, py = np.meshgrid( np.arange(shape[1]),np.arange(shape[0]))
l, t = np.meshgrid(
np.arange(offset[1],img.shape[1]-shape[1]+1,stride[1]),
np.arange(offset[0],img.shape[0]-shape[0]+1,stride[0]) )
l = l.ravel()
t = t.ravel()
x = np.tile( px[None,:,:], (t.size,1,1)) + np.tile( l[:,None,None], (1,shape[0],shape[1]))
y = np.tile( py[None,:,:], (t.size,1,1)) + np.tile( t[:,None,None], (1,shape[0],shape[1]))
return img[y.ravel(),x.ravel()].reshape((t.size,shape[0],shape[1])), (t,l) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _extract_patches_and_positions_from_image(\n image, patch_size, patch_stride, hse_grid_size,\n n_crops, h, w,\n c, scale_id, max_seq_len):\n p = tf.image.extract_patches(\n image, [1, patch_size, patch_size, 1], [1, patch_stride, patch_stride, 1],\n [1, 1, 1, 1],\n padding='SAME')\n\n p = tf.reshape(p, [n_crops, -1, patch_size * patch_size * c])\n\n count_h = _ceil_divide_int(h, patch_stride)\n count_w = _ceil_divide_int(w, patch_stride)\n\n # Shape (num_patches, 1)\n spatial_p = get_hashed_spatial_pos_emb_index(hse_grid_size, count_h, count_w)\n # Shape (1, num_patches, 1)\n spatial_p = tf.expand_dims(spatial_p, axis=0)\n # Shape (n_crops, num_patches, 1)\n spatial_p = tf.tile(spatial_p, (n_crops, 1, 1))\n spatial_p = tf.cast(spatial_p, dtype=p.dtype)\n # Shape (n_crops, num_patches, 1)\n scale_p = tf.ones_like(spatial_p, dtype=p.dtype) * scale_id\n # Shape (n_crops, num_patches, 1)\n mask_p = tf.ones_like(spatial_p, dtype=p.dtype)\n\n # Concatenating is a hacky way to pass both patches, positions and input\n # mask to the model.\n # Shape (n_crops, num_patches, patch_size * patch_size * c + 3)\n out = tf.concat([p, spatial_p, scale_p, mask_p], axis=2)\n if max_seq_len >= 0:\n out = _pad_or_cut_to_max_seq_len(out, max_seq_len)\n out = tf.reshape(out,\n [n_crops, max_seq_len, c * patch_size * patch_size + 3])\n else:\n out = tf.reshape(out, [n_crops, -1, c * patch_size * patch_size + 3])\n return out",
"def get_patches(rimage, gimage, mimage, num_patches=48, patch_size=80, patch_stride=80):\n num_FSpatches = 16\n num_RApatches = 32\n rpatches = []\n gpatches = []\n mpatches = []\n #R_imgs = ((rimage+1)*127.5).astype(np.uint8)\n #scipy.misc.imsave('results'+'/' + 'rainy.jpg', R_imgs[0,:,:,:])\n for i in range(int(math.sqrt(num_FSpatches))):\n for j in range(int(math.sqrt(num_FSpatches))):\n point_x = patch_stride*i\n point_y = patch_stride*j\n rpatch = rimage[:,(point_x):(point_x+patch_size), (point_y):(point_y+patch_size),:]\n #print(point_x)\n #print(point_y)\n #print(point_y+patch_size)\n #P_imgs = ((rpatch+1)*127.5).astype(np.uint8)\n #scipy.misc.imsave('results'+'/' + 'patch_%d_%d.jpg'%(i,j), P_imgs[0,:,:,:])\n #print(rpatch.shape)\n rpatches.append(rpatch)\n #print(np.array(rpatches).shape)\n gpatch = gimage[:,(point_x):(point_x+patch_size), (point_y):(point_y+patch_size),:]\n gpatches.append(gpatch)\n mpatch = mimage[:,(point_x):(point_x+patch_size), (point_y):(point_y+patch_size),:]\n mpatches.append(mpatch)\n\n for k in range(num_RApatches):\n point1 = random.randint(0,240) # 116 comes from the image source size (320) - the patch dimension (80)\n point2 = random.randint(0,240)\n #rpatch = tf.image.crop_to_bounding_box(rimage, point1, point2, patch_size, patch_size)\n rpatch = rimage[:,(point1):(point1+patch_size), (point2):(point2+patch_size),:]\n #P_imgs = ((rpatch+1)*127.5).astype(np.uint8)\n #scipy.misc.imsave('results'+'/' + 'patch_%d.jpg'%i, P_imgs[0,:,:,:])\n #print(rpatch.shape)\n rpatches.append(rpatch)\n #print(np.array(rpatches).shape)\n gpatch = gimage[:,(point1):(point1+patch_size), (point2):(point2+patch_size),:]\n gpatches.append(gpatch)\n mpatch = mimage[:,(point1):(point1+patch_size), (point2):(point2+patch_size),:]\n mpatches.append(mpatch)\n\n rpatches = np.array(rpatches)\n rpatches = np.squeeze(rpatches)\n #print(rpatches.shape)\n gpatches = np.array(gpatches)\n gpatches = np.squeeze(gpatches)\n mpatches = np.array(mpatches)\n mpatches = np.squeeze(mpatches)\n #assert rpatches.get_shape().dims == [num_patches, patch_size, patch_size, 3]\n assert rpatches.shape == (num_patches, patch_size, patch_size, 3)\n return rpatches, gpatches, mpatches",
"def recreate_from_patches(data):\n overlap_height = (PATCHES * PATCH_HEIGHT - IMG_HEIGHT) // (PATCHES - 1) # Overlap of patches along y axis\n step_size_height = PATCH_HEIGHT - overlap_height # Step size along y axis\n\n overlap_width = (PATCHES * PATCH_WIDTH - IMG_WIDTH) // (PATCHES - 1) # Overlap of patches along x axis\n step_size_width = PATCH_WIDTH - overlap_width # Step size along x axis\n\n whole_images = []\n i = 0\n while i < len(data):\n image = np.zeros((IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS)) # Create an empty image to pin patches on\n\n for h in range(PATCHES - 1):\n for w in range(PATCHES - 1):\n # Insert patches into image starting from top left corner, without the patches touching right or bottom border\n if h > 0: # First row has no overlap with patches above them\n if overlap_height > 0:\n # Create array of overlap along y axis with mean values from overlapping patches\n mean_overlap_height = cv2.addWeighted(data[i][:overlap_height], 0.5,\n data[i - PATCHES][step_size_height:], 0.5, 0)\n\n # Insert into patch where it overlaps\n rest = data[i][overlap_height:]\n data[i] = np.append(mean_overlap_height, rest, axis=0)\n\n # Insert patch into image\n image = insert_patch_subpixel(image, data[i], (w * step_size_width + PATCH_WIDTH / 2,\n h * step_size_height + PATCH_HEIGHT / 2))\n\n if w == PATCHES - 2: # If we are at the second to last patch, overlap may be calculated different\n i += 1\n continue\n\n else:\n i += 1\n if overlap_width > 0:\n # Create array of overlap with mean values from overlapping patches\n mean_overlap_width = cv2.addWeighted(data[i][:, [i for i in range(0, overlap_width)]], 0.5,\n data[i - 1][:,\n [i for i in range(PATCH_WIDTH - overlap_width,\n PATCH_WIDTH)]], 0.5, 0)\n # Insert into next patch\n rest = data[i][:, [i for i in range(overlap_width, PATCH_WIDTH)]]\n data[i] = np.append(mean_overlap_width, rest, axis=1)\n\n # Insert patch which touches right border on this height, may overlap more\n overlap_last_width = (PATCH_WIDTH + (PATCHES - 2) * step_size_width) - (IMG_WIDTH - PATCH_WIDTH)\n\n if overlap_last_width > 0:\n # Create array of overlap with mean values from overlapping patches\n mean_overlap_width = cv2.addWeighted(data[i][:, [i for i in range(0, overlap_last_width)]], 0.5,\n data[i - 1][:, [i for i in range(PATCH_WIDTH - overlap_last_width,\n PATCH_WIDTH)]], 0.5, 0)\n # Insert array of overlap into patch, where it overlaps\n rest = data[i][:, [i for i in range(overlap_last_width, PATCH_WIDTH)]]\n data[i] = np.append(mean_overlap_width, rest, axis=1)\n\n # Insert patch into image\n image = insert_patch_subpixel(image, data[i], (IMG_WIDTH - PATCH_WIDTH / 2,\n h * step_size_height + PATCH_HEIGHT / 2))\n i += 1\n\n for w in range(PATCHES - 1):\n # Insert patches from the bottom border, may overlap more\n overlap_last_height = (PATCH_HEIGHT + (PATCHES - 2) * step_size_height) - (IMG_HEIGHT - PATCH_HEIGHT)\n\n if overlap_last_height > 0:\n # Create array of overlap with mean values from overlapping patches\n mean_overlap_height = cv2.addWeighted(data[i][:overlap_last_height], 0.5,\n data[i - PATCHES][PATCH_HEIGHT - overlap_last_height:], 0.5, 0)\n\n # Insert array of overlap into patch where it overlaps\n rest = data[i][overlap_last_height:]\n data[i] = np.append(mean_overlap_height, rest, axis=0)\n\n # Insert patch into image\n image = insert_patch_subpixel(image, data[i], (w * step_size_width + PATCH_WIDTH / 2,\n IMG_HEIGHT - PATCH_HEIGHT / 2))\n i += 1\n\n # Insert patch in the bottom right corner, may overlap more\n overlap_last_width = (PATCH_WIDTH + (PATCHES - 2) * step_size_width) - (IMG_WIDTH - PATCH_WIDTH)\n\n if overlap_last_width > 0:\n # Create array of overlap along x axis with mean values form overlapping patches\n mean_overlap_width = cv2.addWeighted(data[i][:, [i for i in range(0, overlap_last_width)]], 0.5,\n data[i - 1][:, [i for i in range(PATCH_WIDTH - overlap_last_width,\n PATCH_WIDTH)]], 0.5, 0)\n\n # Insert array of overlap into patch\n rest = data[i][:, [i for i in range(overlap_last_width, PATCH_WIDTH)]]\n data[i] = np.append(mean_overlap_width, rest, axis=1)\n\n overlap_last_height = (PATCH_HEIGHT + (PATCHES - 2) * step_size_height) - (IMG_HEIGHT - PATCH_HEIGHT)\n\n if overlap_last_height > 0:\n # Create array of overlap along y axis with mean values from overlapping patches\n mean_overlap_height = cv2.addWeighted(data[i][:overlap_last_height], 0.5,\n data[i - PATCHES][PATCH_HEIGHT - overlap_last_height:], 0.5, 0)\n\n # Insert array of overlap into patch where it overlaps\n rest = data[i][overlap_last_height:]\n data[i] = np.append(mean_overlap_height, rest, axis=0)\n\n image = insert_patch_subpixel(image, data[i], (IMG_WIDTH - PATCH_WIDTH / 2, IMG_HEIGHT - PATCH_HEIGHT / 2))\n i += 1\n whole_images.append(\n image) # All corresponding patches are pinned inside the image, therefore this image is finished\n\n return whole_images",
"def extract_patches(image, patchshape, overlap_allowed=0.1, cropvalue=None, crop_fraction_allowed=0.1):\r\n jump_cols = int(patchshape[1] * overlap_allowed)\r\n jump_rows = int(patchshape[0] * overlap_allowed)\r\n\r\n # Restrict ourselves to the rectangle containing non-cropped pixels\r\n if cropvalue is not None:\r\n rows, cols = np.where(image != cropvalue)\r\n rows.sort()\r\n cols.sort()\r\n active = image[rows[0]:rows[-1], cols[0]:cols[-1]]\r\n else:\r\n active = image\r\n\r\n rowstart = 0\r\n colstart = 0\r\n\r\n # Array tracking where we've already taken patches.\r\n covered = np.zeros(active.shape, dtype=bool)\r\n patches = []\r\n regions = []\r\n while rowstart <= active.shape[0] - patchshape[0]:\r\n # Record whether or not e've found a patch in this row,\r\n # so we know whether to skip ahead.\r\n got_a_patch_this_row = False\r\n colstart = 0\r\n while colstart <= active.shape[1] - patchshape[1]:\r\n # Slice tuple indexing the region of our proposed patch\r\n region = (slice(rowstart, rowstart + patchshape[0]),\r\n slice(colstart, colstart + patchshape[1]))\r\n\r\n # The actual pixels in that region.\r\n patch = active[region]\r\n\r\n # The current mask value for that region.\r\n cover_p = covered[region]\r\n if cropvalue is None or \\\r\n frac_eq_to(patch, cropvalue) <= crop_fraction_allowed and \\\r\n frac_eq_to(cover_p, True) <= overlap_allowed:\r\n # Accept the patch.\r\n patches.append(patch)\r\n regions.append(region)\r\n # Mask the area.\r\n covered[region] = True\r\n\r\n # Jump ahead in the x direction.\r\n colstart += jump_cols\r\n got_a_patch_this_row = True\r\n # print \"Got a patch at %d, %d\" % (rowstart, colstart)\r\n else:\r\n # Otherwise, shift window across by one pixel.\r\n colstart += 1\r\n\r\n if got_a_patch_this_row:\r\n # Jump ahead in the y direction.\r\n rowstart += jump_rows\r\n else:\r\n # Otherwise, shift the window down by one pixel.\r\n rowstart += 1\r\n\r\n # Return a 3D array of the patches with the patch index as the first\r\n # dimension (so that patch pixels stay contiguous in memory, in a\r\n # C-ordered array).\r\n return np.concatenate([pat[np.newaxis, ...] for pat in patches], axis=0),regions",
"def reconstruct_from_grayscale_patches( patches, origin, epsilon=1e-12 ):\n patch_width = patches.shape[2]\n patch_height = patches.shape[1]\n img_width = np.max( origin[1] ) + patch_width\n img_height = np.max( origin[0] ) + patch_height\n\n out = np.zeros( (img_height,img_width) )\n wgt = np.zeros( (img_height,img_width) )\n for i in range(patch_height):\n for j in range(patch_width):\n out[origin[0]+i,origin[1]+j] += patches[:,i,j]\n wgt[origin[0]+i,origin[1]+j] += 1.0\n\n return out/np.maximum( wgt, epsilon ), wgt",
"def image_to_patches(image, patch_size=8, overlap=False, is_mask=False):\n H, W = np.shape(image)\n num_patches = (\n (H - patch_size + 1) * (W - patch_size + 1)\n if overlap\n else int(H / patch_size) * int(W / patch_size)\n )\n patches = (\n np.zeros((patch_size ** 2, patch_size ** 2, num_patches))\n if is_mask\n else np.zeros((patch_size ** 2, num_patches))\n )\n overlap_step = 1 if overlap else patch_size\n count = 0\n for i in np.arange(H - patch_size + 1, step=overlap_step):\n for j in np.arange(W - patch_size + 1, step=overlap_step):\n if is_mask:\n patches[:, :, count] = np.diag(\n np.reshape(image[i : i + patch_size, j : j + patch_size], (-1))\n )\n else:\n patches[:, count] = np.reshape(image[i : i + patch_size, j : j + patch_size], (-1))\n count += 1\n return patches",
"def divide_image_to_patches(img, patch_size, stride=None):\n\n stride = stride or patch_size\n if not 0 < stride <= patch_size:\n raise ValueError(\n 'stride should be positive and smaller than or equal to patch_size')\n\n if len(img.shape) == 2: # this is a mask\n img = np.expand_dims(img, -1)\n\n height, width, n_channels = img.shape\n\n # Sometimes we need to extend the original image so that the sliding window\n # won't move out of the image\n ext_height, ext_width = _get_extended_image_size(\n height, width, patch_size, stride)\n ext_img = np.zeros((ext_height, ext_width, n_channels))\n ext_img[:height, :width] = img\n\n x = []\n\n for i in range(0, ext_height - patch_size + 1, stride):\n for j in range(0, ext_width - patch_size + 1, stride):\n x.append(ext_img[i:i + patch_size, j:j + patch_size, :])\n\n return np.array(x).astype('uint8')",
"def extract_patches(data,patch_dim):\n \n m = data.shape[0]\n im_x = data.shape[1]\n im_y = data.shape[2]\n \n assert im_x%float(patch_dim)==0 and im_y%float(patch_dim)==0, \\\n \"patch_size must divide x and y dimensions of image\"\n\n numpatchs = m*(im_x/patch_dim)*(im_y/patch_dim)\n patch_size = patch_dim**2\n\n patches = np.empty((patch_size,numpatchs))\n p=0\n for i in range(data.shape[0]):\n image = data[i,...]\n for x in np.r_[0:im_x:patch_dim]:\n for y in np.r_[0:im_y:patch_dim]:\n patch = image[x:x+patch_dim,y:y+patch_dim]\n patches[:,p] = patch.ravel()\n p+=1\n \n return patches",
"def create_patches_from_mask(image, mask, patchSize=32, pad=32, depth=1, searchSlices=None):\n rois = []\n images = []\n labels = []\n searchSlices = range(len(mask)) if searchSlices is None else searchSlices\n for i in searchSlices:\n # For each voxel, generate a ROI centered there\n if not np.any(mask[i]):\n continue\n xS, yS = np.nonzero(mask[i, :, :])\n xS -= xS % patchSize\n yS -= yS % patchSize\n allPatches = set(zip(xS, yS))\n for x, y in allPatches:\n patch = np.copy(\n # agafem el patch que ens interessa i agafem un contorn per si de cas (padding)\n # potser seria interessant reduir el padding (la quantitat de marge que deixem)\n # ara mateix tenim patches de 96, quan ens interessa el centre de 32 d'aquests\n image[i - depth: i + 1 + depth, x - pad:x + patchSize + pad, y - pad:y + patchSize + pad]\n )\n label = np.copy(\n # quan fem rotacio al fer data augmentation, ens volem assegurar d'estar treballant amb\n # el mateix\n mask[i: i + 1, x - pad: x + patchSize + pad, y - pad:y + patchSize + pad]\n )\n\n rois.append(np.array([x, y, i]))\n images.append(patch)\n labels.append(label)\n return rois, images, labels",
"def divide_image_to_patches(img, patch_size):\n\n assert len(img.shape) == 3 and img.shape[-1] == 3\n\n height, width, n_channels = img.shape\n coordinates = _get_top_left_coordinates(height, width, patch_size)\n\n patches = []\n\n for top, left in coordinates:\n patches.append(img[top:top + patch_size, left:left + patch_size])\n\n return np.array(patches).astype('uint8')",
"def get_patches(image, label, coordmaps, sample, num_pos = 100, num_neg = 100, all_patches=False, patch_shape= (48,48,48), spacing=(24,24,24), start_idx = 0):\n image_shape = np.shape(image)\n cn_size = image_shape[0]\n sg_size = image_shape[1]\n cr_size = image_shape[2]\n ax_size = image_shape[3]\n\n if not all_patches:\n idx_pos = np.stack(np.where(label[0, ...] > 0))\n \n # Only include points not near boundary\n #sg_idx = np.where(((patch_shape[0]/2) < idx_pos[0]) & (idx_pos[0] < (sg_size - (patch_shape[0]/2))))\n #idx_pos = idx_pos[:,sg_idx[0]]\n #cr_idx = np.where(((patch_shape[1]/2) < idx_pos[1]) & (idx_pos[1] < (cr_size - (patch_shape[1]/2))))\n #idx_pos = idx_pos[:, cr_idx[0]]\n #ax_idx = np.where(((patch_shape[2]/2) < idx_pos[2]) & (idx_pos[2] < (ax_size - (patch_shape[2]/2))))\n #idx_pos = idx_pos[:, ax_idx[0]]\n \n idx_rand = np.random.choice(idx_pos[0].shape[0], num_pos, replace = False)\n cpts_pos_sampled = idx_pos[:, idx_rand] \n \n image_patch_list = []\n label_patch_list = []\n coordmaps_patch_list = []\n for i in range(num_pos):\n idx1_sg = cpts_pos_sampled[0][i] - int(patch_shape[0]/2)\n idx1_cr = cpts_pos_sampled[1][i] - int(patch_shape[1]/2)\n idx1_ax = cpts_pos_sampled[2][i] - int(patch_shape[2]/2)\n \n idx2_sg = idx1_sg + patch_shape[0]\n idx2_cr = idx1_cr + patch_shape[1]\n idx2_ax = idx1_ax + patch_shape[2]\n \n image_patch_orig = image[:, idx1_sg:idx2_sg, idx1_cr:idx2_cr, idx1_ax:idx2_ax]\n image_patch = p.standardize_image(image_patch_orig)\n #image_patch = p.Normalize(image_patch)\n label_patch = label[:, idx1_sg:idx2_sg, idx1_cr:idx2_cr, idx1_ax:idx2_ax]\n coordmaps_patch = coordmaps[:, idx1_sg:idx2_sg, idx1_cr:idx2_cr, idx1_ax:idx2_ax]\n \n image_patch_list.append(image_patch)\n label_patch_list.append(label_patch)\n coordmaps_patch_list.append(coordmaps_patch)\n \n #Write patch/image and control points to csv and save image\n write_patch_to_file(image_patch, label_patch, coordmaps_patch, sample, cpts_pos_sampled[:,i], start_idx + i)\n \n # For negative points\n idx_neg = np.stack(np.where(label[0, ...]==0), axis = 0)\n \n # Only include points not near boundary\n sg_idx = np.where(((patch_shape[0]/2) < idx_pos[0]) & (idx_pos[0] < (sg_size - (patch_shape[0]/2))))\n idx_neg = idx_neg[:,sg_idx[0]]\n cr_idx = np.where(((patch_shape[1]/2) < idx_pos[1]) & (idx_pos[1] < (cr_size - (patch_shape[1]/2))))\n idx_neg = idx_neg[:, cr_idx[0]]\n ax_idx = np.where(((patch_shape[2]/2) < idx_pos[2]) & (idx_pos[2] < (ax_size - (patch_shape[2]/2))))\n idx_neg = idx_neg[:, ax_idx[0]]\n \n idx_rand = np.random.choice(idx_neg[0].shape[0], num_neg, replace = False)\n cpts_neg_sampled = idx_neg[:, idx_rand] \n \n for i in range(num_neg):\n idx1_sg = cpts_pos_sampled[0][i] - int(patch_shape[0]/2)\n idx1_cr = cpts_pos_sampled[1][i] - int(patch_shape[1]/2)\n idx1_ax = cpts_pos_sampled[2][i] - int(patch_shape[2]/2)\n \n idx2_sg = idx1_sg + patch_shape[0]\n idx2_cr = idx1_cr + patch_shape[1]\n idx2_ax = idx1_ax + patch_shape[2]\n \n image_patch_orig = image[:, idx1_sg:idx2_sg, idx1_cr:idx2_cr, idx1_ax:idx2_ax]\n image_patch = p.standardize_image(image_patch_orig)\n #image_patch = p.Normalize(image_patch)\n label_patch = label[:, idx1_sg:idx2_sg, idx1_cr:idx2_cr, idx1_ax:idx2_ax]\n coordmaps_patch = coordmaps[:, idx1_sg:idx2_sg, idx1_cr:idx2_cr, idx1_ax:idx2_ax]\n \n image_patch_list.append(image_patch)\n label_patch_list.append(label_patch)\n coordmaps_patch_list.append(coordmaps_patch)\n \n #Write patch/image and control points to csv and save image\n write_patch_to_file(image_patch, label_patch, coordmaps_patch, sample, cpts_pos_sampled[:,i], start_idx + num_pos + i)\n \n cpts = np.concatenate((cpts_pos_sampled, cpts_neg_sampled), axis = 1)\n \n return image_patch_list, label_patch_list, coordmaps_patch_list, cpts, start_idx + num_pos + i\n\n else:\n \n idx = p.grid_center_points(image.shape[1:], spacing)\n \n # Only include points not near boundary\n sg_idx = np.where(((patch_shape[0]/2) < idx[0]) & (idx[0] < (sg_size - (patch_shape[0]/2))))\n idx = idx[:,sg_idx[0]]\n cr_idx = np.where(((patch_shape[1]/2) < idx[1]) & (idx[1] < (cr_size - (patch_shape[1]/2))))\n idx = idx[:, cr_idx[0]]\n ax_idx = np.where(((patch_shape[2]/2) < idx[2]) & (idx[2] < (ax_size - (patch_shape[2]/2))))\n idx = idx[:, ax_idx[0]]\n \n image_patch_list = []\n label_patch_list = []\n coordmaps_patch_list = []\n \n for i in range(idx.shape[1]):\n \n idx1_sg = idx[0][i] - int(patch_shape[0]/2)\n idx1_cr = idx[1][i] - int(patch_shape[1]/2)\n idx1_ax = idx[2][i] - int(patch_shape[2]/2)\n \n idx2_sg = idx1_sg + patch_shape[0]\n idx2_cr = idx1_cr + patch_shape[1]\n idx2_ax = idx1_ax + patch_shape[2]\n \n image_patch_orig = image[:, idx1_sg:idx2_sg, idx1_cr:idx2_cr, idx1_ax:idx2_ax]\n image_patch = p.standardize_image(image_patch_orig)\n #image_patch = p.Normalize(image_patch)\n label_patch = label[:, idx1_sg:idx2_sg, idx1_cr:idx2_cr, idx1_ax:idx2_ax]\n coordmaps_patch = coordmaps[:, idx1_sg:idx2_sg, idx1_cr:idx2_cr, idx1_ax:idx2_ax]\n \n image_patch_list.append(image_patch)\n label_patch_list.append(label_patch)\n coordmaps_patch_list.append(coordmaps_patch)\n \n return image_patch_list, label_patch_list, coordmaps_patch_list, idx, len(image_patch_list)",
"def img_to_patches(img, patch_size, stride, overlapping=True):\r\n h, w, _ = img.shape\r\n\r\n assert h == w, 'height should be equal to width ({} != {})'.format(h, w)\r\n assert overlapping or patch_size % stride == 0, 'cannot have non overlapping patches with {} % {} != 0' \\\r\n .format(patch_size, stride)\r\n assert (h - patch_size) % stride == 0, 'height - patch_size should be dividable by stride but {} % {} != 0' \\\r\n .format(h - patch_size, stride)\r\n\r\n n_stride = (h - patch_size) // stride + 1\r\n patches = []\r\n for i in range(n_stride):\r\n if overlapping or i * stride % patch_size == 0:\r\n for j in range(n_stride):\r\n if overlapping or j * stride % patch_size == 0:\r\n patch = img[i * stride: i * stride + patch_size, j * stride: j * stride + patch_size]\r\n patches.append(patch)\r\n return np.array(patches)",
"def patches_sampling(self, image, patch_size, stride):\n h, w = image.shape[2:4]\n patches = []\n for i in range(0, h - patch_size + 1, stride):\n for j in range(0, w - patch_size + 1, stride):\n patches.append(image[:, :, i:i + patch_size, j:j + patch_size])\n patches = torch.cat(patches, dim=0).to(self.device)\n return patches",
"def patches_to_img(patches, stride, img_shape):\r\n if len(img_shape) > 2:\r\n channels = [patches_to_img(patches[:, :, :, i], stride, img_shape[:2]) for i in range(3)]\r\n return np.concatenate(channels, axis=2)\r\n\r\n h, w = img_shape\r\n patch_size = patches.shape[1]\r\n n_stride = (h - patch_size) // stride + 1\r\n\r\n assert h == w, \"only squared image are accepted\"\r\n assert (h - patch_size) % stride == 0, \"The stride must be adapted on image and patch size\"\r\n assert len(patches) == n_stride ** 2, \"They must be the right number of patches per image\"\r\n\r\n pred_final = np.zeros(img_shape + (1,)) # Accumulator for the final prediction\r\n pred_normalizer = np.zeros(img_shape + (1,)) # Counter of the patch per prediction per pixel\r\n\r\n for i in range(n_stride):\r\n for j in range(n_stride):\r\n x_from, x_to = i * stride, i * stride + patch_size\r\n y_from, y_to = j * stride, j * stride + patch_size\r\n idx = i * n_stride + j\r\n pred_final[x_from: x_to, y_from: y_to] += patches[idx].reshape(patch_size, patch_size, 1)\r\n pred_normalizer[x_from: x_to, y_from: y_to] += 1\r\n return pred_final / pred_normalizer",
"def dense_patch_slices(image_size, patch_size, scan_interval):\n num_spatial_dims = len(image_size)\n if num_spatial_dims not in (2, 3):\n raise ValueError(\"image_size should has 2 or 3 elements\")\n patch_size = get_valid_patch_size(image_size, patch_size)\n scan_interval = ensure_tuple_size(scan_interval, num_spatial_dims)\n\n scan_num = [\n int(math.ceil(float(image_size[i]) / scan_interval[i])) if scan_interval[i] != 0 else 1\n for i in range(num_spatial_dims)\n ]\n slices = []\n if num_spatial_dims == 3:\n for i in range(scan_num[0]):\n start_i = i * scan_interval[0]\n start_i -= max(start_i + patch_size[0] - image_size[0], 0)\n slice_i = slice(start_i, start_i + patch_size[0])\n\n for j in range(scan_num[1]):\n start_j = j * scan_interval[1]\n start_j -= max(start_j + patch_size[1] - image_size[1], 0)\n slice_j = slice(start_j, start_j + patch_size[1])\n\n for k in range(0, scan_num[2]):\n start_k = k * scan_interval[2]\n start_k -= max(start_k + patch_size[2] - image_size[2], 0)\n slice_k = slice(start_k, start_k + patch_size[2])\n slices.append((slice_i, slice_j, slice_k))\n else:\n for i in range(scan_num[0]):\n start_i = i * scan_interval[0]\n start_i -= max(start_i + patch_size[0] - image_size[0], 0)\n slice_i = slice(start_i, start_i + patch_size[0])\n\n for j in range(scan_num[1]):\n start_j = j * scan_interval[1]\n start_j -= max(start_j + patch_size[1] - image_size[1], 0)\n slice_j = slice(start_j, start_j + patch_size[1])\n slices.append((slice_i, slice_j))\n return slices",
"def dense_patch_slices(\n image_size: Sequence[int], patch_size: Sequence[int], scan_interval: Sequence[int], return_slice: bool = True\n) -> list[tuple[slice, ...]]:\n num_spatial_dims = len(image_size)\n patch_size = get_valid_patch_size(image_size, patch_size)\n scan_interval = ensure_tuple_size(scan_interval, num_spatial_dims)\n\n scan_num = []\n for i in range(num_spatial_dims):\n if scan_interval[i] == 0:\n scan_num.append(1)\n else:\n num = int(math.ceil(float(image_size[i]) / scan_interval[i]))\n scan_dim = first(d for d in range(num) if d * scan_interval[i] + patch_size[i] >= image_size[i])\n scan_num.append(scan_dim + 1 if scan_dim is not None else 1)\n\n starts = []\n for dim in range(num_spatial_dims):\n dim_starts = []\n for idx in range(scan_num[dim]):\n start_idx = idx * scan_interval[dim]\n start_idx -= max(start_idx + patch_size[dim] - image_size[dim], 0)\n dim_starts.append(start_idx)\n starts.append(dim_starts)\n out = np.asarray([x.flatten() for x in np.meshgrid(*starts, indexing=\"ij\")]).T\n if return_slice:\n return [tuple(slice(s, s + patch_size[d]) for d, s in enumerate(x)) for x in out]\n return [tuple((s, s + patch_size[d]) for d, s in enumerate(x)) for x in out] # type: ignore",
"def _extract_patches(img, patch_s):\n def np_extract_patches(img):\n orig = np.array(img.shape[:2])\n new = patch_s[0] * np.ceil(orig / patch_s[0]).astype(int)\n points = new - orig\n img = np.pad(img, [(0, points[0]), (0, points[1]), (0, 0)],\n mode='constant')\n patches = view_as_blocks(img, tuple(patch_s)).astype(np.float32)\n patches = patches.reshape(-1, *patch_s)\n return patches\n\n patches = tf.numpy_function(np_extract_patches, [img], tf.float32)\n return patches",
"def prepare_train_patches(images_path, labels_path, indices, patch_size, overlap, overlap_amount, aug_config):\n\n # Load images and labels\n images = extract_images(images_path, indices)\n labels = extract_images(labels_path, indices)\n\n # Get patches\n if overlap:\n image_patches = [patch for im in images for patch in patchify_overlap(im, patch_size, overlap_amount)]\n label_patches = [patch for label in labels for patch in patchify_overlap(label, patch_size, overlap_amount)]\n else:\n image_patches = [patch for im in images for patch in patchify(im, patch_size)]\n label_patches = [patch for label in labels for patch in patchify(label, patch_size)]\n \n if not aug_config:\n return image_patches, label_patches\n\n patches = zip(image_patches, label_patches)\n\n # Rotation needs to be applied on whole image\n if aug_config.do_rotation:\n images_rot = rotate_images(images, aug_config.rotation_angles)\n labels_rot = rotate_images(labels, aug_config.rotation_angles)\n\n for im, label in zip(images_rot, labels_rot):\n p = patchify_no_corner(im, label, patch_size, overlap, overlap_amount)\n image_patches.extend(p[0])\n label_patches.extend(p[1])\n\n # Flip each patch horizontally\n images_flipped = []\n labels_flipped = []\n if aug_config.do_flip:\n flip_hor = iaa.Fliplr(0.5).to_deterministic()\n flip_ver = iaa.Flipud(0.5).to_deterministic()\n images_flipped.extend(flip_hor.augment_images(image_patches))\n images_flipped.extend(flip_ver.augment_images(image_patches))\n labels_flipped.extend(flip_hor.augment_images(label_patches))\n labels_flipped.extend(flip_ver.augment_images(label_patches))\n\n image_patches.extend([im.copy() for im in images_flipped])\n label_patches.extend([im.copy() for im in labels_flipped])\n\n # For all the patches (even new ones), augment channels\n if aug_config.augment_channels:\n image_patches = augment_channels(image_patches, aug_config)\n\n return image_patches, label_patches",
"def extract_patch(n, patch_size, imgs):\n # Extract patches from input images\n img_patches = [img_crop(imgs[i], patch_size, patch_size) for i in range(n)]\n #gt_patches = [img_crop(gt_imgs[i], patch_size, patch_size) for i in range(n)]\n\n # Linearize list of patches\n img_patches = np.asarray([img_patches[i][j] for i in range(len(img_patches)) for j in range(len(img_patches[i]))])\n #gt_patches = np.asarray([gt_patches[i][j] for i in range(len(gt_patches)) for j in range(len(gt_patches[i]))])\n \n return img_patches #,gt_patches",
"def combine_patches_to_image(y_pred, img, stride):\n\n counter = 0\n height, width = img.shape[:2]\n output_size = y_pred.shape[1]\n\n # The last channel is the number of overlapping patches for a given pixel,\n # used for averaging predictions from multiple windows.\n combined = np.zeros((height, width, y_pred.shape[-1] + 1))\n\n for i in range(0, height - output_size + 1, stride):\n for j in range(0, width - output_size + 1, stride):\n patch = combined[i:i + output_size, j:j + output_size, :-1]\n overlaps = combined[i:i + output_size, j:j + output_size, -1:]\n patch = (patch * overlaps + y_pred[counter]) / (overlaps + 1)\n combined[i:i + output_size, j:j + output_size, :-1] = patch\n overlaps += 1.\n counter += 1\n\n return combined[:height, :width, :-1]",
"def get_patches(image_mat, stride):\n window_shape = (128, 128, 3)\n windows = view_as_windows(image_mat, window_shape, step=stride)\n patches = []\n for m in range(windows.shape[0]):\n for n in range(windows.shape[1]):\n patches += [windows[m][n][0]]\n return patches",
"def img_to_patches(img, win, stride=1):\n k = 0\n endc = img.shape[0]\n endw = img.shape[1]\n endh = img.shape[2]\n if endw<win or endh<win:\n return np.zeros([endc,win,win,0])\n patch = img[:, 0:endw-win+0+1:stride, 0:endh-win+0+1:stride]\n total_pat_num = patch.shape[1] * patch.shape[2]\n res = np.zeros([endc, win*win, total_pat_num], np.float32)\n for i in range(win):\n for j in range(win):\n patch = img[:, i:endw-win+i+1:stride, j:endh-win+j+1:stride]\n res[:, k, :] = np.array(patch[:]).reshape(endc, total_pat_num)\n k = k + 1\n return res.reshape([endc, win, win, total_pat_num])",
"def apply_patch_on_the_image(img, patch, count=5, offset=150):\n mask = np.zeros(shape=img.shape)\n boxes = []\n prev = (0, 0)\n gen = gencoordinates(img.shape[0], img.shape[1])\n for i in range(count):\n rnd = random.choice([x for x in range(100)])\n x_offset = rnd + patch.shape[0]\n y_offset = rnd + patch.shape[1]\n x_offset += prev[0]\n y_offset += prev[1]\n if y_offset < patch.shape[1]:\n y_offset = patch.shape[1]\n if x_offset < patch.shape[0]:\n x_offset = patch.shape[0]\n img[y_offset:y_offset+patch.shape[0], x_offset:x_offset+patch.shape[1]] = patch\n mask[y_offset:y_offset+patch.shape[0], x_offset:x_offset+patch.shape[1]] = 1\n boxes.append((y_offset, patch.shape[0], x_offset, patch.shape[1]))\n prev = (x_offset, y_offset)\n return img, mask, boxes",
"def extract_patches_single_scale(\n patch_size: int,\n stride: int,\n image_lt: lt.LabeledTensor,\n name: str = None,\n) -> Tuple[np.ndarray, lt.LabeledTensor]:\n with tf.compat.v1.name_scope(name, \"extract_patches_single_scale\", [image_lt]) as scope:\n image_lt = lt.transpose(image_lt, [\"batch\", \"row\", \"column\", \"channel\"])\n image_lt = tensorcheck.bounds(0.0, 1.0, image_lt)\n\n logging.info(\"extract_patches_single_scale: Input axes: %s\", image_lt.axes)\n\n batch_size = len(image_lt.axes[\"batch\"])\n num_rows = len(image_lt.axes[\"row\"])\n num_columns = len(image_lt.axes[\"column\"])\n\n row_offsets = range(0, num_rows - patch_size + 1, stride)\n if not row_offsets:\n raise ValueError(\"num_rows - patch_size + 1 must be >= 1\")\n expected_num_rows = _num_extracted_rows_and_columns(num_rows, patch_size,\n stride, 1, 2)\n assert len(row_offsets) == expected_num_rows, (len(row_offsets),\n expected_num_rows,\n (num_rows, patch_size,\n stride))\n\n column_offsets = range(0, num_columns - patch_size + 1, stride)\n assert column_offsets\n expected_num_columns = _num_extracted_rows_and_columns(\n num_columns, patch_size, stride, 1, 2)\n assert len(column_offsets) == expected_num_columns, (len(column_offsets),\n expected_num_columns,\n (num_rows, patch_size,\n stride))\n\n offsets = [(r, c) for r in row_offsets for c in column_offsets]\n\n patch_lts = []\n for b in range(batch_size):\n for (row, column) in offsets:\n patch_lt = lt.slice(\n image_lt, {\n \"batch\": slice(b, b + 1),\n \"row\": slice(row, row + patch_size),\n \"column\": slice(column, column + patch_size)\n })\n patch_lts.append(patch_lt)\n\n pack_lt = lt.concat(patch_lts, \"batch\")\n reshape_lt = lt.reshape(pack_lt, [\"batch\"], [\n image_lt.axes[\"batch\"], (\"patch_row\", len(row_offsets)),\n (\"patch_column\", len(column_offsets))\n ])\n\n reshape_lt = tensorcheck.shape(reshape_lt)\n reshape_lt = tensorcheck.bounds(0.0, 1.0, reshape_lt, name=scope)\n\n centers = [\n (r + patch_size / 2.0, c + patch_size / 2.0) for (r, c) in offsets\n ]\n\n logging.info(\"extract_patches_single_scale: Output axes: %s\",\n reshape_lt.axes)\n\n return np.array(centers), reshape_lt",
"def get_patches_non_overlap(array, patch_height, patch_width): \n total_patches_in_height = array.shape[0]//patch_height\n total_patches_in_width = array.shape[1]//patch_width\n # print(\"total patches in height from supplied image array : {}\".format(total_patches_in_height))\n # print(\"total patches in width from supplied image array : {}\".format(total_patches_in_width))\n \n total_patches = total_patches_in_height * total_patches_in_width\n # print(\"total patches from supplied image array : {}\".format(total_patches))\n patches = np.empty(shape=(total_patches, 1, patch_height, patch_width), dtype=np.uint8)\n \n patch_no = 0\n for i in range(0, array.shape[0], patch_height):\n for j in range(0, array.shape[1], patch_width):\n if (i+patch_height <= array.shape[0]+1) and (j+patch_width <= array.shape[1]+1):\n patches[patch_no, 0, :, :] = array[i:i+patch_height, j:j+patch_width]\n patch_no += 1\n return patches",
"def generate_patches_from_img(img, patch_size=128):\n\n new_width, new_height, channels = img.shape\n\n if img.shape[0] % 128 != 0:\n new_width = img.shape[0] + (128 - img.shape[0] % 128)\n\n if img.shape[1] % 128 != 0:\n new_height = img.shape[1] + (128 - img.shape[1] % 128)\n\n resized_img = resize(img, (new_width, new_height))\n\n block_shape = (128, 128, 3)\n img_blocks = view_as_blocks(resized_img, block_shape=block_shape)\n\n img_patches = {}\n\n for r in range(img_blocks.shape[0]):\n for c in range(img_blocks.shape[1]):\n img = img_blocks[r, c]\n img = np.reshape(img, (128, 128, 3))\n img_patches[(r, c)] = img\n\n return img_patches",
"def extract_patches(image_list, mask_src, image_src, mask_dst, image_dst, patch_size):\n class_counts = defaultdict(lambda: 0)\n skipped = 0\n total = 0\n for im in tqdm(image_list):\n img = cv2.imread(os.path.join(image_src, im))\n msk = cv2.imread(os.path.join(mask_src, im), 0)\n \n assert (img.shape[0] == msk.shape[0]) \\\n and (img.shape[1] == msk.shape[1]), \"Mismatch!\"\n\n img_patches = patchify(img, (patch_size, patch_size, 3), step=patch_size)\n msk_patches = patchify(msk, (patch_size, patch_size), step=patch_size)\n img_patches = img_patches.reshape((-1, patch_size, patch_size, 3))\n msk_patches = msk_patches.reshape((-1, patch_size, patch_size))\n # Step = 256 for patch size means no overlap\n for i in range(img_patches.shape[0]):\n # Replace class labels\n mask_patch = replace_classes(msk_patches[i])\n unique, counts = np.unique(mask_patch, return_counts=True)\n # If outside of RoI takes > 90% and there is only 1 class, ignore the patch.\n outside = np.mean(mask_patch == 0) > 0.9\n if outside and (len(unique) < 2):\n skipped += 1\n continue\n for x, y in enumerate(unique):\n class_counts[y] += counts[x].item()\n img_patch = img_patches[i]\n filename = im.split(\".png\")[0] + \"_\" + str(i) + \".png\"\n cv2.imwrite(os.path.join(image_dst, filename), img_patch)\n cv2.imwrite(os.path.join(mask_dst, filename), mask_patch)\n total += 1\n print('Skipped: {} / {}'.format(skipped, total))\n return class_counts",
"def extract_patch_from_img(array, patch_index, patch_size, z_offset=0, mean=None, std=None):\n patch_index[0] -= z_offset\n patch_index[1] -= z_offset\n\n z, x, y = array.shape\n ww = [patch_size[0], patch_size[1], patch_size[2]]\n\n ret = np.zeros(ww)\n temp_patch_index = np.array(patch_index).copy()\n ww = [0, patch_size[0], 0, patch_size[1], 0, patch_size[2]]\n\n # if patch overlaps image boundry (needs 0 padding) offset image index\n if temp_patch_index[0] < 0:\n ww[0] -= temp_patch_index[0]\n temp_patch_index[0] = 0\n if temp_patch_index[2] < 0:\n ww[2] -= temp_patch_index[2]\n temp_patch_index[2] = 0\n if temp_patch_index[4] < 0:\n ww[4] -= temp_patch_index[4]\n temp_patch_index[4] = 0\n\n if temp_patch_index[1] > z:\n ww[1] -= temp_patch_index[1] - z\n temp_patch_index[1] = z\n if temp_patch_index[3] > x:\n ww[3] -= temp_patch_index[3] - x\n temp_patch_index[3] = x\n if temp_patch_index[5] > y:\n ww[5] -= temp_patch_index[5] - y\n temp_patch_index[5] = y\n if temp_patch_index[0] >= temp_patch_index[1]:\n temp_patch_index[0] = temp_patch_index[1] - 1\n\n insert = array[temp_patch_index[0]:temp_patch_index[1],\n temp_patch_index[2]:temp_patch_index[3],\n temp_patch_index[4]:temp_patch_index[5]]\n\n # normalize patch\n if not (mean is None or std is None):\n insert = np.divide(insert - mean, std)\n\n ret[ww[0]:ww[1], ww[2]:ww[3], ww[4]:ww[5]] = insert\n\n return ret",
"def extract_image_patches(images, ksizes, strides, rates, padding='same'):\n assert len(images.size()) == 4\n assert padding in ['same', 'valid']\n batch_size, channel, height, width = images.size()\n\n if padding == 'same':\n images = same_padding(images, ksizes, strides, rates)\n elif padding == 'valid':\n pass\n else:\n raise NotImplementedError('Unsupported padding type: {}.\\\n Only \"same\" or \"valid\" are supported.'.format(padding))\n\n unfold = torch.nn.Unfold(kernel_size=ksizes,\n dilation=rates,\n padding=0,\n stride=strides)\n patches = unfold(images)\n return patches # [N, C*k*k, L], L is the total number of such blocks",
"def iter_patch(\n arr: NdarrayOrTensor,\n patch_size: Sequence[int] | int = 0,\n start_pos: Sequence[int] = (),\n overlap: Sequence[float] | float = 0.0,\n copy_back: bool = True,\n mode: str | None = NumpyPadMode.WRAP,\n **pad_opts: dict,\n) -> Generator[tuple[NdarrayOrTensor, np.ndarray], None, None]:\n\n from monai.transforms.croppad.functional import pad_nd # needs to be here to avoid circular import\n\n # ensure patchSize and startPos are the right length\n patch_size_ = get_valid_patch_size(arr.shape, patch_size)\n start_pos = ensure_tuple_size(start_pos, arr.ndim)\n\n # set padded flag to false if pad mode is None\n padded = bool(mode)\n is_v = [bool(p) for p in ensure_tuple_size(patch_size, arr.ndim)] # whether a valid patch size provided\n _pad_size = tuple(p if v and padded else 0 for p, v in zip(patch_size_, is_v)) # pad p if v else 0\n _overlap = [op if v else 0.0 for op, v in zip(ensure_tuple_rep(overlap, arr.ndim), is_v)] # overlap if v else 0.0\n # pad image by maximum values needed to ensure patches are taken from inside an image\n if padded:\n arrpad = pad_nd(arr, to_pad=[(p, p) for p in _pad_size], mode=mode, **pad_opts) # type: ignore\n # choose a start position in the padded image\n start_pos_padded = tuple(s + p for s, p in zip(start_pos, _pad_size))\n\n # choose a size to iterate over which is smaller than the actual padded image to prevent producing\n # patches which are only in the padded regions\n iter_size = tuple(s + p for s, p in zip(arr.shape, _pad_size))\n else:\n arrpad = arr\n start_pos_padded = start_pos\n iter_size = arr.shape\n\n for slices in iter_patch_slices(iter_size, patch_size_, start_pos_padded, _overlap, padded=padded):\n # compensate original image padding\n if padded:\n coords_no_pad = tuple((coord.start - p, coord.stop - p) for coord, p in zip(slices, _pad_size))\n else:\n coords_no_pad = tuple((coord.start, coord.stop) for coord in slices)\n yield arrpad[slices], np.asarray(coords_no_pad) # data and coords (in numpy; works with torch loader)\n\n # copy back data from the padded image if required\n if copy_back:\n slices = tuple(slice(p, p + s) for p, s in zip(_pad_size, arr.shape))\n arr[...] = arrpad[slices] # type: ignore"
] | [
"0.69615763",
"0.6910621",
"0.6902872",
"0.6756077",
"0.6684931",
"0.6669656",
"0.66367173",
"0.66181797",
"0.6613878",
"0.6597331",
"0.65837264",
"0.65633184",
"0.65623057",
"0.652115",
"0.6456871",
"0.6398756",
"0.6369198",
"0.6359843",
"0.63577175",
"0.6351932",
"0.6345157",
"0.63389385",
"0.6328715",
"0.6301238",
"0.628669",
"0.6277554",
"0.6260962",
"0.62547845",
"0.6247685",
"0.62453574"
] | 0.7973168 | 0 |
assert json schema for requests from api.openweathermap.org | def validate_schema_openweathermap(self, actual, schema):
resources_dir = os.path.abspath(os.getcwd())
relative_schema_path = valid_json_schema if schema == 'Valid' else error_json_schema
schema_data = open(os.path.join(resources_dir, relative_schema_path))
self.validate_schema(actual, json.load(schema_data))
return self | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_complete_data_schema(self):\n response = self.client.get(self.url)\n data = response.data\n self.assertIn('id', data)\n self.assertIn('title', data)\n self.assertIn('release_year', data)\n self.assertIn('casting', data)\n self.assertIn('directors', data)\n self.assertIn('producers', data)\n self.assertIn('roman_release_year', data)",
"def test_trucks_api(self):\n resp = self.app.get('/trucks')\n self.assertEqual(resp.status_code, 200)\n\n # ensure proper JSON is returned\n data = json.loads(resp.data)\n assert 'resp' in data\n for item in data['resp']:\n # address is not actually required\n assert 'name' in item\n assert 'fooditems' in item\n assert 'latitude' in item\n assert 'longitude' in item\n assert 'schedule' in item",
"def check_valid_schema(context):\n data = context.response.json()\n validate_schema(data)",
"def test_complete_data_schema(self):\n response = self.client.get(self.url)\n data = response.data\n self.assertIn('id', data)\n self.assertIn('first_name', data)\n self.assertIn('last_name', data)\n self.assertIn('aliases', data)\n self.assertIn('movies_as_actor', data)\n self.assertIn('movies_as_director', data)\n self.assertIn('movies_as_producer', data)",
"def test_parse_weather_weather_simple_json(self):\n\n # Parse the data.\n actual = timeseries.parse_weather(self.weather_simple)\n\n # Ensure actual and expected results are equal.\n pd.testing.assert_frame_equal(actual, self.weather_simple_expected)",
"def testSchemaLoadingAsString(self):\n api = self.ApiFromDiscoveryDoc('latitude.v1.json')\n self.assertEquals(4, len(api._schemas))",
"def test_api_schema(self):\n response = self.client.get(\"/api/schema/\")\n self.assertEqual(response.status_code, 200)\n self.assertEqual(\n response.get(\"Content-Type\"), \"application/vnd.oai.openapi; charset=utf-8\"\n )\n self.assertEqual(\n response.get(\"Content-Disposition\"), 'inline; filename=\"Marsha API.yaml\"'\n )",
"def validate_json(self):\n pass",
"def test_meta_data_okay(self):\n self.expect_json_http({\"some\": \"value\"},\n uri=re.compile(\".*/articles/1234-56\"))\n\n self.assertEqual({\"some\": \"value\"},\n federalregister.meta_data(\"1234-56\"))",
"def test_simple2(self):\n api = self.load_api_description('simple2.json')\n self.assertEqual(api.name, 'Starbucks')\n self.assertEqual(len(api.resources), 1)\n\n resource = api.resources[0]\n self.assertEqual(resource.name, 'AllOrders')\n self.assertEqual(resource.path, '/')\n self.assertEqual(len(resource.operations), 2)\n\n operation = resource.operations[0]\n self.assertEqual(operation.method, 'GET')\n self.assertIsNone(operation.input)\n output = operation.output\n self.assertEqual(output.status, 200)\n self.assertEqual(output.type.type.get_reference_name(), 'list(Order)')\n self.assertEqual(len(output.contentType), 1)\n self.assertEqual(output.contentType[0], 'json')\n\n operation = resource.operations[1]\n self.assertEqual(operation.method, 'POST')\n input = operation.input\n self.assertEqual(input.type.type.get_reference_name(), 'OrderRequest')\n self.assertEqual(input.contentType[0], 'json')\n output = operation.output\n self.assertEqual(output.status, 201)\n self.assertEqual(output.type.type.get_reference_name(), 'Order')\n self.assertEqual(len(output.contentType), 1)\n self.assertEqual(output.contentType[0], 'json')\n\n self.assertEqual(len(api.base), 1)\n self.assertEqual(api.base[0], 'http://test.com/starbucks')\n\n self.assertEqual(len(api.data_types), 2)\n self.assertEqual(len(api.data_types[0].fields), 5)\n self.assertEqual(len(api.data_types[1].fields), 2)\n self.assertFalse(api.data_types[1].fields[0].optional)\n self.assertTrue(api.data_types[1].fields[1].optional)",
"def test_functional_good_ip(self, url):\n response = requests.get(\"http://localhost:80/ip2w/{url}\".format(url=url))\n if response.status_code != BAD_GATEWAY:\n print(\"\\nGATEWAY is OK\")\n self.assertEqual(response.status_code, OK)\n content = response.json()\n self.assertEqual(len(content), 3)\n self.assertTrue(content.get(\"temp\"))\n self.assertTrue(content.get(\"city\"))\n else:\n print(\"\\nGATEWAY is RESET BY PEER\")",
"def test_simple4(self):\n api = self.load_api_description('simple4.json')\n self.assertEqual(api.name, 'Starbucks')\n self.assertEqual(len(api.resources), 2)\n\n resource = api.resources[1]\n self.assertEqual(resource.name, 'AllOrders')\n self.assertEqual(resource.path, '/')\n self.assertEqual(len(resource.operations), 2)\n\n operation = resource.operations[0]\n self.assertEqual(operation.method, 'GET')\n self.assertIsNone(operation.input)\n output = operation.output\n self.assertEqual(output.status, 200)\n self.assertEqual(output.type.type.get_reference_name(), 'list(Order)')\n self.assertEqual(len(output.contentType), 1)\n self.assertEqual(output.contentType[0], 'json')\n\n operation = resource.operations[1]\n self.assertEqual(operation.method, 'POST')\n input = operation.input\n self.assertEqual(input.type.type.get_reference_name(), 'OrderRequest')\n self.assertEqual(input.contentType[0], 'json')\n output = operation.output\n self.assertEqual(output.status, 201)\n self.assertEqual(output.type.type.get_reference_name(), 'Order')\n self.assertEqual(len(output.contentType), 1)\n self.assertEqual(output.contentType[0], 'json')\n self.assertEqual(len(output.headers), 1)\n header = output.headers[0]\n self.assertEqual(header.name, 'Location')\n self.assertEqual(header.type.type.get_reference_name(), 'href')\n self.assertEqual(header.type.ref, 'Order')\n\n self.assertEqual(len(api.base), 1)\n self.assertEqual(api.base[0], 'http://test.com/starbucks')\n\n self.assertEqual(len(api.data_types), 2)\n self.assertEqual(len(api.data_types[0].fields), 5)\n self.assertEqual(len(api.data_types[1].fields), 2)\n self.assertFalse(api.data_types[1].fields[0].optional)\n self.assertTrue(api.data_types[1].fields[1].optional)\n\n resource = api.resources[0]\n self.assertEqual(len(resource.input_bindings), 1)\n self.assertEqual(resource.input_bindings[0].id, 'orderIdBinding')\n self.assertEqual(len(resource.operations), 2)\n self.assertEqual(resource.operations[0].input.params[0].binding, 'orderIdBinding')\n self.assertEqual(resource.operations[1].input.params[0].binding, 'orderIdBinding')",
"def check_schema(self, response):\n self.assertEqual(response.status_code, http.client.OK)\n result = response.json()\n url = response.links['schema']['url']\n try:\n schema = self.schemas[url]\n except KeyError:\n r = self.GET(url)\n self.assertEqual(r.status_code, http.client.OK)\n schema = r.json()\n self.schemas[url] = schema\n self.validate_schema(result, schema)\n return result",
"def test_no_input(self):\n resp = SearchTest.client.get('/api/search/')\n self.assertEqual(json.loads(resp.content),\"You give your input in wrong format. Please check the API documentation for the appropriate input format!!\",\"No Input Test Error\")",
"def decide_schema(self, json_data):\n pass",
"def decide_schema(self, json_data):\n pass",
"def test_schema_invalid_json(self):\n schema_0_input = schema_nested_2_invalid_JSON\n\n # if you uncomment this line:\n # schema_0_input = schema_nested_2\n # this will fail the test: Failed: DID NOT RAISE <class 'simplejson.scanner.JSONDecodeError'>\n # because this is a valid schema\n\n with pytest.raises(simplejson.scanner.JSONDecodeError):\n msg = singer.parse_message(schema_0_input)",
"def test__import_api_v7(self):\n response = textwrap.dedent(\n \"\"\"\\\n const apiSchema = [\n {\n \"info\" : {\n }\n }\n ]\n ;\n \"\"\"\n )\n self._test__import_api(response)",
"def test_valid_analysis_request(analysis_request_dict: JSONDict) -> None:\n\n request = AnalysisRequest(**analysis_request_dict)\n\n assert request.dict() == analysis_request_dict",
"def test_validate_schema():\n data = {\n 'business': {\n 'cacheId': 1,\n 'foundingDate': '2007-04-08T00:00:00+00:00',\n 'identifier': 'CP1234567',\n 'legalName': 'legal name CP1234567'\n },\n }\n\n is_valid, _ = validate(data, 'business', validate_schema=True)\n\n assert is_valid",
"def test_validation_get_order_schema(self):\n self.assertIsInstance(api.validation.fetch_order_schema(), dict)",
"def test_validate_json_validates_schema(self):\n invalid_schema = {\"type\": \"any\"}\n valid_json = {}\n test_model = RecordSchema(schema=invalid_schema)\n\n with self.assertRaises(jsonschema.exceptions.SchemaError):\n test_model.validate_json(valid_json)",
"def test_api_hackernews_post_topstories_comments_no_json_data(client):\n response = client.post(\n \"/api/hackernews/topstories/123456789/comments\",\n )\n response = json.loads(response.data)\n assert (\n {\n '_schema': ['Invalid input type.'],\n\n }\n ) == response",
"def test_simple3(self):\n api = self.load_api_description('simple3.json')\n self.assertEqual(api.name, 'Starbucks')\n self.assertEqual(len(api.resources), 2)\n\n resource = api.resources[1]\n self.assertEqual(resource.name, 'AllOrders')\n self.assertEqual(resource.path, '/')\n self.assertEqual(len(resource.operations), 2)\n\n operation = resource.operations[0]\n self.assertEqual(operation.method, 'GET')\n self.assertIsNone(operation.input)\n output = operation.output\n self.assertEqual(output.status, 200)\n self.assertEqual(output.type.type.get_reference_name(), 'list(Order)')\n self.assertEqual(len(output.contentType), 1)\n self.assertEqual(output.contentType[0], 'json')\n\n operation = resource.operations[1]\n self.assertEqual(operation.method, 'POST')\n input = operation.input\n self.assertEqual(input.type.type.get_reference_name(), 'OrderRequest')\n self.assertEqual(input.contentType[0], 'json')\n output = operation.output\n self.assertEqual(output.status, 201)\n self.assertEqual(output.type.type.get_reference_name(), 'Order')\n self.assertEqual(len(output.contentType), 1)\n self.assertEqual(output.contentType[0], 'json')\n\n self.assertEqual(len(api.base), 2)\n self.assertEqual(api.base[0], 'http://test.com/starbucks')\n self.assertEqual(api.base[1], 'https://test.com/starbucks')\n\n self.assertEqual(len(api.data_types), 4)\n self.assertEqual(len(api.data_types[0].fields), 5)\n self.assertEqual(len(api.data_types[1].fields), 2)\n self.assertFalse(api.data_types[1].fields[0].optional)\n self.assertTrue(api.data_types[1].fields[1].optional)\n\n operation = api.resources[0].operations[0]\n self.assertEqual(len(operation.errors), 2)",
"def test_minimum_args(self) -> None:\n schema = JSONSchema()\n self.assertIsInstance(schema.schema, str)\n self.assertIsNone(schema.title)\n self.assertIsNone(schema.description)",
"def test_api_base(self):\n # load api base\n r = requests.get('{server}/api/0.1/'.format(\n server=self.get_server_url()))\n j = r.json()\n self.assertIn('gages', j)\n self.assertIn('sections', j)\n self.assertIn('regions', j)\n self.assertIn('rivers', j)\n self.assertIn('sensors', j)\n self.assertIn('samples', j)",
"def test_json() -> Response:\n response = requests.get('https://httpbin.org/json')\n return jsonify(response.json())",
"def test_get_json_spec(self):\n pass",
"def test_input_schema(self, data, errors):\n resp = self.client.post(self.url, json=data)\n\n if not errors:\n assert resp.status_code == 200\n assert resp.get_json() == {\n 'status': 'OK',\n 'message': 'Data published via Upload service',\n }\n else:\n assert resp.status_code == 400\n assert resp.get_json() == {\n 'status': 'Error',\n 'message': 'Input payload validation failed',\n 'errors': {\n k: ['Missing data for required field.'] for k in errors\n },\n }",
"def test_openapi_schema(app, client):\n response = client.get(\"/swagger/\")\n assert response.status_code == 200\n assert len(json.loads(response.data)[\"paths\"]) > 0"
] | [
"0.62621653",
"0.6240114",
"0.6214563",
"0.6125648",
"0.5942485",
"0.5931073",
"0.575438",
"0.57441986",
"0.57291126",
"0.57290787",
"0.57119447",
"0.56926596",
"0.5668845",
"0.5663258",
"0.5649895",
"0.5649895",
"0.5649045",
"0.56264323",
"0.5599427",
"0.5597548",
"0.5595037",
"0.55873364",
"0.5579084",
"0.55725527",
"0.55676025",
"0.55456424",
"0.5538711",
"0.55208063",
"0.5520564",
"0.5511551"
] | 0.6302012 | 0 |
Count the number of nonempty dicts/lists or other objects | def recursive_count(o):
if isinstance(o, dict):
c = 0
for v in o.values():
c += recursive_count(v)
return c
elif isinstance(o, list):
c = 0
for v in o:
c += recursive_count(v)
return c
else:
return 1 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def num_empty(self):\n count = 0\n for i in self.__buckets:\n if i.size() == 0:\n count += 1\n return count",
"def __len__(self):\n total_objs = 0\n\n if self._shelve is not None:\n total_objs += len(self._shelve)\n\n if self._dict is not None:\n total_objs += len(self._dict)\n\n return total_objs",
"def count(self):\n return len([i for i in self.iteritems()])",
"def cnt(iterable):\n return len(iterable) if iterable is not None else 0",
"def get_types_count():\n return len(type_dict.keys())",
"def none_count(d):\n return six.moves.reduce(lambda x, y: x + 1 if y == None else x, d.values(), 0)",
"def get_dict_data_len(x_dict: Dict[Any, Collection]):\n return check_all_same_length(*x_dict.values())",
"def __len__(self):\n return len(self._dict)",
"def __len__(self):\n return len(self._dict)",
"def __len__(self) -> int:\n return len(self._dict)",
"def size(self):\n return dict.__len__(self)",
"def __len__(self):\n return len(self.data.keys())",
"def __len__(self):\n return self._data_dict.__len__()",
"def count_dictionary_values(self):\n my_dictionary = {'A': [1, 2, 3, 4, 5, 6, 7, 8, 9],\n 'B': 34,\n 'C': 12,\n 'D': [7, 8, 9, 6, 4]}\n\n count = 0\n for key, value in my_dictionary.items():\n if isinstance(value, list):\n count += len(value)\n print(\"Number of items in a dictionary value i.e a list :\", count)",
"def count(item):\n return len(item)",
"def Counts(dict_of_list):\n return {k: len(v) for k, v in dict_of_list.iteritems()}",
"def _size_of_dict(dictionary):\n size = len(dictionary.keys())\n for value in dictionary.values():\n size += len(value)\n return size",
"def nb_objects(self) -> int:\n return 0",
"def count(self):\n return len(self.objects)",
"def size(matrix):\n size = 0\n for _,row in matrix.items():\n #size += len([r for r in row.values() if r != None])\n for _,v in row.items():\n #print(\"V:\",v)\n size += 1 if v != None else 0\n return size",
"def __len__(self):\n return sum(item['cantidad'] for item in self.carro.values())",
"def __len__(self):\n return sum(item['qty'] for item in self.basket.values()) # counts all the values of the key qty",
"def numnems(self):\n count = 0\n for o in self._objs.values():\n count += len(o.netifs())\n return count",
"def count(self):\n return len(self)",
"def size(self):\n return len(LongObjectHashMap.self)",
"def size(self):\n return len(LongObjectHashMap.self)",
"def get_num_animals():\n return jsonify(len(list(rd.keys(\"*\"))))",
"def _dict_values_count_hashed(a_dict, count_this):\n counter = 0\n for value in a_dict.values():\n if value == count_this:\n counter += 1\n elif (\n isinstance(value, dict)\n and isinstance(count_this, dict)\n and \"hash\" in value\n and \"hash\" in count_this\n and \"size\" in value\n and \"size\" in count_this\n and value[\"hash\"] == count_this[\"hash\"]\n ):\n counter += 1\n \"hash\" in value and isinstance(count_this, dict) and \"hash\" in count_this\n return counter",
"def number_keys(a_dictionary):\n return(len(a_dictionary))",
"def count_objects_of_size(self, n: int, **parameters: int) -> int:"
] | [
"0.71943367",
"0.71715474",
"0.69646627",
"0.6960094",
"0.6950782",
"0.6901174",
"0.6860939",
"0.6854527",
"0.68178976",
"0.6807361",
"0.6695624",
"0.6688045",
"0.6622141",
"0.6599806",
"0.6558737",
"0.6528312",
"0.6516975",
"0.64872235",
"0.6447478",
"0.6436087",
"0.641087",
"0.63838947",
"0.6376552",
"0.63462913",
"0.6342291",
"0.6342291",
"0.6333362",
"0.6329662",
"0.6318653",
"0.6316472"
] | 0.7272351 | 0 |
Returns a list formed by the evaluation types present in criteria. | def get_evaluation_analysis_types(self, parameters):
eval_types =[]
for evaluation_criteria_id in parameters["clustering"]["evaluation"]["evaluation_criteria"]:
# for subcriteria in parameters["clustering"]["evaluation"]["evaluation_criteria"][evaluation_criteria_id]:
# eval_types.append(subcriteria)
eval_types.extend(parameters["clustering"]["evaluation"]["evaluation_criteria"][evaluation_criteria_id].keys())
return list(set(eval_types)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_query_and_evaluation_analysis_types(self, parameters):\n queries = parameters[\"clustering\"][\"evaluation\"][\"query_types\"]\n queries.extend(AnalysisPopulator.get_evaluation_analysis_types(parameters))\n return list(set(queries))",
"def getResultDefs(self, type=None):\n results = self.results.values()\n\n if type:\n results = filter(lambda result: result.type == type, results)\n\n return results",
"def getTypesList():\n return Gw2Spidy._request('types')['results']",
"def report_type_choices():\n\n rts = report_types()\n rcs = report_categories()\n return [(c, [(rt.report_type, rt.name) for rt in rts if rt.category == c]) for c in rcs]",
"def filter_evaluations_by_type(self, type_):\n from .evaluation import Evaluation\n from .code_component import CodeComponent\n\n joined_eval = join(\n Evaluation.t, CodeComponent.t,\n ((Evaluation.m.trial_id == CodeComponent.m.trial_id) &\n (Evaluation.m.code_component_id == CodeComponent.m.id))\n )\n joined = join(\n Activation.t, joined_eval,\n ((Evaluation.m.trial_id == Activation.m.trial_id) &\n (Evaluation.m.activation_id == Activation.m.id))\n )\n query = (\n select([CodeComponent.m.name, Evaluation.m.repr])\n .select_from(joined)\n .where((Activation.m.trial_id == self.trial_id) &\n (Activation.m.id == self.id) &\n (CodeComponent.m.type == type_))\n )\n for result in relational.session.execute(query):\n yield result",
"def getType(self, terms):\n\n\t\treturn [i for i in xrange(len(self.toTYPE)) if terms in self.toTYPE[i]]",
"def to_criteria(self):\r\n c = []\r\n if self.minmax_criteria is not None:\r\n c.extend(self.minmax_criteria.values())\r\n\r\n return c",
"def statistify(criteria):\n final = []\n for degree in criteria.keys():\n if degree == 'total':\n continue\n for num in range(0,criteria[degree]):\n final.append(int(degree.split('degree')[1]))\n return final",
"def get_evaluators(categories):\n eval_metric_fn_keys = [EVAL_DEFAULT_METRIC]\n evaluators_list = []\n for eval_metric_fn_key in eval_metric_fn_keys:\n evaluators_list.append(\n EVAL_METRICS_CLASS_DICT[eval_metric_fn_key](categories=categories))\n return evaluators_list",
"def get_data_types(self):\n data_types = set()\n for er in self.exercise_recordings:\n for data_type in er.data_types:\n if data_type not in data_types:\n data_types.add(data_type)\n return list(data_types)",
"def get_evaluators(eval_config, categories):\n eval_metric_fn_key = eval_config.metrics_set\n if eval_metric_fn_key not in EVAL_METRICS_CLASS_DICT:\n raise ValueError('Metric not found: {}'.format(eval_metric_fn_key))\n return [\n EVAL_METRICS_CLASS_DICT[eval_metric_fn_key](\n categories=categories)\n ]",
"def _inferred_type_levels(self) -> list[str]:\n return [i.inferred_type for i in self.levels]",
"def get_rules_for_type(type):\n\n rules = get_db().execute('SELECT * FROM ruleset WHERE type=?', (type,)).fetchall()\n\n return rules",
"def getTypes():\n\t\n\ttranslationTable = []\n\tfor x in typePrimitive:\n\t\ttranslationTable.extend(x[0])\n\t\n\tid = 0\n\ttypes = []\n\tmax = 0\n\tfor x in typePrimitive:\n\t\t\n\t\tbinds = []\n\t\tfor y in x[2]:\n\t\t\tbinds.append(translationTable.index(y))\n\t\t\n\t\tif (x[4] != False) and (x[4] > max):\n\t\t\tmax = x[4]\n\t\t\t\n\t\t\n\t\ttypes.append({'name':x[0],'nSites':x[1],'binds':binds,'sym':x[3],'id':id,'max':x[4]})\n\t\tid+=1\n\t\n\treturn (max,types)",
"def types(self) -> list:\n if self._types is None:\n fdist = self.fdist # ranked order\n types_ = list(fdist.type.values)\n self._types = types_\n return self._types",
"def test_for_criteria(self):\n ignore = ['interpreter_method', 'average_by_sample_or_site', 'include_nrm']\n values = ([dic['value'] for dic in self.acceptance_criteria.values() if (dic['criterion_name'] not in ignore and dic['value'] != -999)])\n return values",
"def filter_criteria(self):\n return self.filter_nodes('//Validation/Criteria')",
"def get_types(self) -> List[str]:\n return sorted(list(self._radii.keys()))",
"def load_criterias():\r\n l = [ (p.id, p.name) for p in StockProperty.objects.all() ]\r\n l.insert(0, ('', 'Select to add criteria ...'))\r\n return l",
"def get_criteria(self):\n\n\t\treturn self.__criteria",
"def items(self):\n return self._rules_by_lhs.items()",
"def gettypes(self):\n return [str(self.sd.xlate(t[0])) for t in self.sd.types]",
"def get_check_types():",
"def getCriteriaItems( self ):\n # filter out empty strings\n result = []\n\n value = tuple( filter( None, self.value ) )\n if not value:\n return ()\n result.append( ( self.field, self.value ), )\n\n if self.operator is not None:\n result.append( ( '%s_operator' % self.field, self.operator ) )\n\n return tuple( result )",
"def get_filter_types(verbose=False):\n if verbose:\n pprint(filter_types)\n return filter_types",
"def getTypes(quizScores):\n challenger = 0\n collaborator = 0\n communicator = 0\n contributor = 0\n for x in range(18):\n questionScore = quizScores[x]\n challenger += int(questionScore[0])\n collaborator += int(questionScore[1])\n communicator += int(questionScore[2])\n contributor += int(questionScore[3])\n return [ challenger, collaborator, communicator, contributor]",
"def getProposalTypesVocab(self):\n list = DisplayList()\n # Acquire the types\n types = self.aq_inner.aq_parent.getProposalTypes()\n for type in types:\n list.add(type, type)\n return list",
"def opinion_type_list():\n for type_ in orm.DataFlagOpinionType.select():\n click.echo(type_.name)",
"def __get_condition_types(condition):\n branch_condition = condition.get(\"specification\")\n condition_type, condition_value = branch_condition.split(\".\")\n condition_types = condition_type.split(\" \")\n return condition_types",
"def criterion_type(self) -> str:\n return pulumi.get(self, \"criterion_type\")"
] | [
"0.67036015",
"0.615326",
"0.59587413",
"0.5896585",
"0.5799256",
"0.5777026",
"0.5747461",
"0.5745",
"0.5661408",
"0.5641381",
"0.5631738",
"0.551762",
"0.551464",
"0.5496893",
"0.5494026",
"0.54865164",
"0.54589295",
"0.5457875",
"0.5433231",
"0.54234606",
"0.5407233",
"0.5399143",
"0.53881145",
"0.5371538",
"0.5361982",
"0.53485054",
"0.53307325",
"0.53098816",
"0.5306773",
"0.53030264"
] | 0.76133484 | 0 |
Returns the 'details' field of a clustering. | def analysis_function_details(self,clustering):
return clustering.details | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_details(self):\n return self.details",
"def get_details(self):\n return self.details",
"def get_details(self):\n return self.details",
"def details(self) -> \"dict\":\n return self._attrs.get(\"details\")",
"def details(self):\n return self._details",
"def detail(self):\n info = self.info()\n return info",
"def describe(self) -> None:\n return {\n 'cluster_metadata': self.cluster_metadata,\n 'master_url': self.master_url\n }",
"def get_cluster_info(self) -> Dict[str, Any]:\n pass",
"def details(self):\n pass",
"def detailedInfo(cls):\n return 'tbd'",
"def detailedInfo(cls):\n return 'tbd'",
"def detail(self):\n return self.status[\"health\"][\"detail\"]",
"def get_details(self):",
"def getDetailsJSON(self):\n return self.__detailsJSON",
"def detail_cluster(cluster_name, znode):\n\n _cluster_info = dict()\n _cluster_info.update(app.clusters[cluster_name].__dict__)\n _cluster_info.pop(\"auth_data\", None)\n _cluster_info[\"connection\"] = app.managers[cluster_name]._client.state\n resp = Response(json.dumps(_cluster_info),\n status=200,\n mimetype=\"application/json\")\n return resp",
"def get_cluster_entry(self):\n\n cert_data = self.cluster_description.get(\"certificateAuthority\", {}).get(\"data\", \"\")\n endpoint = self.cluster_description.get(\"endpoint\")\n arn = self.cluster_description.get(\"arn\")\n\n return OrderedDict([\n (\"cluster\", OrderedDict([\n (\"certificate-authority-data\", cert_data),\n (\"server\", endpoint)\n ])),\n (\"name\", arn)\n ])",
"def details(self):\n raise NotImplementedError()",
"def cluster_description(self):\n if self._cluster_description is None:\n if self._parsed_globals is None:\n client = self._session.create_client(\"eks\")\n else:\n client = self._session.create_client(\n \"eks\",\n region_name=self._parsed_globals.region,\n endpoint_url=self._parsed_globals.endpoint_url,\n verify=self._parsed_globals.verify_ssl\n )\n full_description = client.describe_cluster(name=self._cluster_name)\n self._cluster_description = full_description[\"cluster\"]\n\n if \"status\" not in self._cluster_description:\n raise EKSClusterError(\"Cluster not found\")\n if self._cluster_description[\"status\"] not in [\"ACTIVE\", \"UPDATING\"]:\n raise EKSClusterError(\"Cluster status is {0}\".format(\n self._cluster_description[\"status\"]\n ))\n\n return self._cluster_description",
"def _get_details(self, details):\n details['DoT'] = \"Yes\" if self.static else \"No\"\n details['device'] = self.device\n details['volume_id'] = self.volume_id\n details['from_snap'] = \"No\" if not self.from_snapshot_id else self.from_snapshot_id\n details['from_archive'] = \"No\" if not self.from_archive else self.from_archive['url']\n details['snapshot_progress'] = self.snapshot_progress\n details['snapshot_status'] = self.snapshot_status\n # TODO: keep track of any errors\n details['err_msg'] = None if details.get('err_msg', '') == '' else details['err_msg']\n details['snapshots_created'] = self.snapshots_created\n return details",
"def __repr__(self):\n return (\n f'GalaxyCluster {self.unique_id}: '\n f'(ra={self.ra}, dec={self.dec}) at z={self.z}'\n f'\\n> with columns: {self._str_colnames()}'\n f'\\n> {len(self.galcat)} source galaxies'\n )",
"def details (self):\n return six.text_type(self)",
"def info(self):\n for key, value in self.dataset['info'].items():\n print('{}: {}'.format(key, value))",
"def details(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[Mapping[str, pulumi.Input[str]]]]]]:\n return pulumi.get(self, \"details\")",
"def get_discovery_summary():\n pass",
"def get_details(disease):\n\treturn d_desc_map[disease]",
"def get_details(self):\n raise Exception(\"bad details\")",
"def get_details(self):\n return self.__config_data",
"def raw_detail_dicts(self):\n # TODO(chris): validate columns using column headers.\n details = []\n selector = '#ae-instances-details-table tbody tr'\n for element in self.doc.cssselect(selector):\n children = list(element)\n assert len(children) == 9, [child.text for child in children]\n details.append({\n 'instance_id': element.attrib['id'].strip(),\n 'qps': children[0].text.strip(),\n 'latency': children[1].text.strip(),\n 'requests': children[2].text.strip(),\n 'errors': children[3].text.strip(),\n 'age': children[4].text.strip(),\n 'memory': children[5].text.strip()\n })\n return details",
"def __repr__(self):\n s = f'sample:\\n{self.sample}\\n'\n s += f'cluster:\\n{self.cluster}\\n'\n s += f'largest_cluster:\\n{self.get_largest_cluster()}'\n return s",
"def info(self):\n ss = \"\\nSummary EffectiveArea2D info\\n\"\n ss += \"----------------\\n\"\n # Summarise data members\n ss += array_stats_str(self.energy, 'energy')\n ss += array_stats_str(self.offset, 'offset')\n ss += array_stats_str(self.eff_area, 'dispersion')\n\n return ss"
] | [
"0.6636939",
"0.6636939",
"0.6636939",
"0.6543469",
"0.64725894",
"0.63629097",
"0.62274104",
"0.6106484",
"0.60739094",
"0.6035391",
"0.6035391",
"0.60135996",
"0.6000487",
"0.5963064",
"0.59448",
"0.5934605",
"0.5902071",
"0.5895396",
"0.58460885",
"0.5807865",
"0.57522964",
"0.57400453",
"0.5730994",
"0.5702703",
"0.56945956",
"0.5692235",
"0.56831974",
"0.5671745",
"0.5614645",
"0.5606766"
] | 0.7595597 | 0 |
Returns the number of elements that are clusterized in this clustering (which may not be the total number of elements of the dataset if there were noisy elements) | def analysis_function_total_elements(self,clustering):
return clustering.total_number_of_elements | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def n_clusters(self):\n return len(self.clusters)",
"def get_cluster_count(self) -> int:\n return len(self.get_all_cluster_ids())",
"def count_elements_in_dataset(dataset):\n return dataset.count()",
"def cluster_count(self) -> int:\n cluster_count = max(1, round(16**3 * (self.vein.purity / 100.0) / self.cluster_size))\n return self.distribution.scale_cluster_count(cluster_count)",
"def analysis_function_num_clusters(self,clustering):\n return len(clustering.clusters)",
"def num_elements(self):\n return self.subset.num_elements()",
"def cluster_obs_count(self):\n return(self.merged_data.groupby(\n 'labels').count().transpose().iloc[0, :])",
"def element_size(self):\n vecs = (\n self.nodes[self.elements[:, :4], :][:, 1:, :]\n - self.nodes[self.elements[:, :4], :][:, 0, None, :]\n )\n return np.abs(np.linalg.det(vecs)) / 6",
"def gal_count(clusters):\n sum = 0\n for x in clusters:\n sum += x.ngal\n return sum",
"def n_clusters(self):\n return self.model.n_clusters",
"def nsites(self) -> int:\n return len(self.A)",
"def valency(self):\n return len(self.neighbors())",
"def n_points(self):\n\n if self.data_reduced:\n return len(self.data_reduced[0])\n else:\n return 0",
"def elements_count(self):\n return self.__elements_count",
"def getNNodesTot(self):\n nNodesTot = 0\n for iElt in Elements._all:\n nNodesTot += len(iElt.coord)\n return nNodesTot",
"def count(self):\n\t\treturn len(list(self.nodes))",
"def __len__(self):\n return len(self.dataset) * self.samples_per_pair",
"def dissimilarity(clusters):\n totDist = 0\n for c in clusters:\n totDist += c.variability()\n return totDist",
"def __len__(self):\n return len(self.centroid_vector)",
"def count(self):\n\n if self.cluster:\n return self.cluster.count()\n\n return super().count()",
"def get_cluster_sizes(self,sc):\n\n clusterSizes = []\n clusters = sc.labels\n for k in np.arange(clusters.max()):\n clusterSizes.append(len(np.where(clusters==k)[0])) \n \n return clusterSizes",
"def n_good_features_(self):\n return np.sum(self.important_features_)",
"def GetNumberOfElements(self, assoc):\n result = 0\n for dataset in self:\n result += dataset.GetNumberOfElements(assoc)\n return int(result)",
"def n_elements(self) -> int:\n n_elem = np.prod(self.shape)\n if self.n_timesteps > 1:\n n_elem = int(n_elem / self.n_timesteps)\n return n_elem",
"def __len__(self):\n return len(np.where(np.logical_not(self.data.mask))[0])",
"def count(self):\n return len(self._elements)",
"def count_nodes(self):\n\t\treturn self.__count_nodes(self)",
"def voxel_count(self):\n return self.cols * self.rows * self.sections",
"def size(self):\n\t\treturn len(self.nodes)",
"def element_count(self):\n return self._internal.get_element_count()"
] | [
"0.7475261",
"0.73660713",
"0.7281601",
"0.7209829",
"0.7184535",
"0.71797055",
"0.71275187",
"0.7005667",
"0.6999668",
"0.69551873",
"0.68624055",
"0.68511283",
"0.67983764",
"0.6792057",
"0.6780994",
"0.6774845",
"0.6771099",
"0.66886616",
"0.66632557",
"0.66320395",
"0.66132414",
"0.6564767",
"0.6555667",
"0.6509118",
"0.6487408",
"0.64816576",
"0.64807916",
"0.6468132",
"0.64678425",
"0.64630216"
] | 0.74841356 | 0 |
Returns the percentage of elements of the clustering that are in the 4 bigger clusters. | def analysis_function_top_4(self,clustering):
clustering.sort_clusters_by_size()
total = 0
percents = clustering.get_population_percent_of_n_bigger_clusters(4)
for p in percents:
total = total+p
return total | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def dissimilarity(clusters):\n totDist = 0\n for c in clusters:\n totDist += c.variability()\n return totDist",
"def purity_score(clusters, classes):\n clusters = np.array(clusters)\n classes = np.array(classes)\n A = np.c_[(clusters,classes)]\n\n n_accurate = 0.\n\n for j in np.unique(A[:,0]):\n z = A[A[:,0] == j, 1]\n x = np.argmax(np.bincount(z))\n n_accurate += len(z[z == x])\n\n return n_accurate / A.shape[0]",
"def analysis_function_num_clusters_to_percent(self,clustering,percent):\n return clustering.number_of_clusters_to_get_percent(percent)",
"def purity(clusters, classes):\n\n d = defaultdict(list)\n\n # Get a list of class numbers of all examples in a cluster.\n for k, v in zip(clusters, classes):\n d[k].append(v)\n\n mayority = 0\n\n # Count the mayority class number and add it up over all clusters.\n for k in d:\n mayority += Counter(d[k]).most_common(1)[0][1]\n\n return float(mayority) / len(clusters)",
"def gal_count(clusters):\n sum = 0\n for x in clusters:\n sum += x.ngal\n return sum",
"def test_clusters(trained_data, centroids):\n\n for c in range(len(centroids)):\n count_1 = 0\n count_0 = 0\n for p in range(len(trained_data)):\n if trained_data[p][-2] == 0 and trained_data[p][-1] == centroids[c]:\n count_0 += 1\n if trained_data[p][-2] == 1 and trained_data[p][-1] == centroids[c]:\n count_1 += 1\n print (\"Centroid \", c+1, \":\", centroids[c])\n print(\"Number of 1's: \", count_1)\n print(\"Number of 0's: \", count_0)\n print(\"Percent 1's: \", round((count_1/(count_1 + count_0))*100,2))\n print(\"Percent 0's: \", round((count_0 / (count_1 + count_0)) * 100,2))\n print(\"****************\")",
"def _calculate_cluster_measures(\n arr4d,\n threshold,\n bin_struct,\n two_sided_test=False,\n):\n n_regressors = arr4d.shape[3]\n\n max_sizes = np.zeros(n_regressors, int)\n max_masses = np.zeros(n_regressors, float)\n\n for i_regressor in range(n_regressors):\n arr3d = arr4d[..., i_regressor].copy()\n\n if two_sided_test:\n arr3d[np.abs(arr3d) <= threshold] = 0\n else:\n arr3d[arr3d <= threshold] = 0\n\n labeled_arr3d, _ = label(arr3d > 0, bin_struct)\n\n if two_sided_test:\n # Label positive and negative clusters separately\n n_positive_clusters = np.max(labeled_arr3d)\n temp_labeled_arr3d, _ = label(\n arr3d < 0,\n bin_struct,\n )\n temp_labeled_arr3d[temp_labeled_arr3d > 0] += n_positive_clusters\n labeled_arr3d = labeled_arr3d + temp_labeled_arr3d\n del temp_labeled_arr3d\n\n clust_vals, clust_sizes = np.unique(labeled_arr3d, return_counts=True)\n assert clust_vals[0] == 0\n\n clust_vals = clust_vals[1:] # First cluster is zeros in matrix\n clust_sizes = clust_sizes[1:]\n\n # Cluster mass-based inference\n max_mass = 0\n for unique_val in clust_vals:\n ss_vals = np.abs(arr3d[labeled_arr3d == unique_val]) - threshold\n max_mass = np.maximum(max_mass, np.sum(ss_vals))\n\n # Cluster size-based inference\n max_size = 0\n if clust_sizes.size:\n max_size = np.max(clust_sizes)\n\n max_sizes[i_regressor], max_masses[i_regressor] = max_size, max_mass\n\n return max_sizes, max_masses",
"def get_clust_num_perc(model, vis_perc=0.9):\n\tnc = len(np.where(model.allocmodel.Nk > 0)[0])\n\tidx = np.argsort(-model.allocmodel.Nk)[0:nc]\n\n\ttot = model.allocmodel.Nk[idx].sum()\n\tcursum = 0\n\ti = 0\n\twhile cursum < tot*vis_perc:\n\t cursum += model.allocmodel.Nk[idx][i]\n\t i+=1\n\n\treturn i",
"def compute_cluster_class_fractions(k_means_model, y):\n\n n_classes = y.shape[1]\n class_labels = utils.one_hot_to_index(y)\n cluster_labels = k_means_model.labels_\n\n class_clustroid_counts = np.zeros((n_classes, K))\n for i in range(len(class_labels)):\n class_clustroid_counts[class_labels[i], cluster_labels[i]] += 1\n\n class_clustroid_fractions = class_clustroid_counts / np.sum(class_clustroid_counts, axis=1).reshape(n_classes, 1)\n\n print(\"\\n---- Class Clustroid Distribution ----\")\n for i in range(n_classes):\n print(\"Class {}: {}\".format(i, class_clustroid_fractions[i, :]))",
"def calculate_cluster_size(result, var):\n \n cluster_results=pd.DataFrame(result[var].value_counts())\n ratio=np.round(cluster_results/cluster_results.sum()*100, 2).rename(columns={var:\"ratio\"})\n return cluster_results.join(ratio)",
"def clust_strength(mat,groups):\n cluster_strengths = []\n for group in range(len(np.unique(groups))):\n this_cluster = mat[groups==group,:]\n this_cluster_mean = np.mean(this_cluster,axis=0)\n all_dists = mat - this_cluster_mean\n out_dists = np.linalg.norm(all_dists[groups!=group],axis=1)\n in_dists = np.linalg.norm(all_dists[groups==group],axis=1)\n this_strength = np.mean(out_dists)/np.mean(in_dists)\n cluster_strengths.append(this_strength)\n \n return np.mean(cluster_strengths)",
"def clusterAlgorithm(values):\n clusterMap = dict()\n for value in values:\n if value[2] not in clusterMap.keys():\n clusterMap[value[2]] = []\n clusterMap[value[2]].append(value)\n frequency = [float(len(clusterMap[value[2]])) for value in values]\n total = sum(frequency)\n weightValues = [freq / total for freq in frequency]\n print sum(weightValues)\n lightValues = [value[1] for value in values]\n return np.average(lightValues, weights = weightValues)",
"def get_susceptibility(clusters):\n\n # If there is no or only one cluster then there is no finite cluster\n if len(clusters) <= 1:\n return np.nan\n\n # Remove largest, i.e. infinite, cluster\n clusters.remove(max(clusters))\n\n sizes = np.array(list(set(clusters)))\n n_s = []\n\n for size in sizes:\n n_s.append(clusters.count(size))\n\n temp = sizes * n_s\n S = np.sum(sizes * temp) / np.sum(temp)\n\n return S",
"def clusterSize(l, scheme, clustertype='fluid'):\n clist = findClusters(l, scheme, clustertype)\n \n avglists=[]\n for i in clist:\n avglist=[]\n for l in i:\n avglist.append(np.mean(l))\n avglists.append(np.mean(avglist))\n return avglists",
"def assign_labels_to_centroids(self):\n labelled_centroids = []\n for i in range(len(self.final_clusters)):\n labels = map(lambda x: x[0], self.final_clusters[i])\n # pick the most common label\n most_common = Counter(labels).most_common(1)[0][0]\n c = np.round(len([item for item in self.final_clusters[i] if item[0]==1])/len(self.final_clusters[i]),2)\n if c>=0.46:\n most_common = 1.0\n centroid = (most_common, self.final_centroids[i])\n labelled_centroids.append(centroid)\n\n self.labelled_centroids = labelled_centroids\n print(\"cluster_0: \", np.round(len([item for item in self.final_clusters[0] if item[0]==1])/len(self.final_clusters[0]),2), \"size_0: \", len(self.final_clusters[0]))\n print(\"cluster_1: \", np.round(len([item for item in self.final_clusters[1] if item[0]==1])/len(self.final_clusters[1]),2), \"size_1: \", len(self.final_clusters[1]))\n #print(\"cluster_2: \", np.round(len([item for item in self.final_clusters[2] if item[0]==1])/len(self.final_clusters[2]),2), \"size_2: \", len(self.final_clusters[2]))\n #print(\"cluster_3: \", np.round(len([item for item in self.final_clusters[3] if item[0]==1])/len(self.final_clusters[3]),2), \"size_2: \", len(self.final_clusters[3]))",
"def evaluateClusters( features, labels):\r\n\r\n\treturn silhouette_score( features, labels)",
"def compute_total_bipartiteness(hypergraph, clusters):\n bipartiteness_sum = 0\n for i in range(len(clusters)):\n for j in range(i + 1, len(clusters)):\n bipartiteness_sum += hypergraph_bipartiteness(hypergraph, clusters[i], clusters[j])\n return bipartiteness_sum",
"def davies_bouldin_score(self):\r\n print(colored(\"The davies bouldin score of the clustering is %0.002f\\n\" %(davies_bouldin_score(self.X, self.labels)),color = 'red', attrs=['bold']))\r\n print()\r\n print(colored(\"The points in each cluster are : \",color = 'yellow', attrs=['bold']))\r\n print(collections.Counter(self.labels))",
"def get_optimal_clusters(cell,threshold=140):\n\n\t#\tTurn image to numpy array\n\tpic = image_to_matrix(cell)\n\n\t#\tGet the array of coordinates of dark dots\n\tdots = get_threshold_dots(pic,threshold)\n\n\tscores = []\n\n\tfor n_clusters in range(1,10):\n\t\tclusters = kmeans.kmeans(pic,pic.shape[0],pic.shape[1],50,n_clusters,threshold)\n\t\tprint clusters\n\n\t\tsquare_sum_array = [0]*n_clusters\n\t\tcount_array = [0]*n_clusters\n\n\t\tfor dot in dots:\n\t\t\tdistance_array = [kmeans.euclid_distance(dot,cluster) for cluster in clusters]\n\t\t\tmin_index = distance_array.index(min(distance_array))\n\t\t\tsquare_sum_array[min_index] += kmeans.euclid_distance(clusters[min_index],dot)\n\t\t\tcount_array[min_index] += 1\n\n\t\tvariances = [square_sum/(count+0.001) for square_sum, count in zip(square_sum_array,count_array)]\n\n\t\tprint variances\n\t\tscores.append(sum(variances)/len(variances))\n\n\treturn scores",
"def diversion_score(X, offspring_list):\r\n similarity_sum = 0\r\n if len(offspring_list[0]) == 2:\r\n offspring_list = [(parent_a, offspring, parent_a) for (parent_a, offspring) in offspring_list]\r\n for (parent_a, offspring, parent_b) in offspring_list:\r\n similarity_sum += max(icc(parent_a, offspring), icc(parent_b, offspring))\r\n return (1 - (((similarity_sum / len(offspring_list)) + 1) / 2)) * 100 # move from [-1,1] to [0,2], then to [0,1], then inverse, finally move to [0,100]\r",
"def ds_ratio(group):\n nix_count = (group=='nix').sum()\n top_count = (group=='top').sum()\n ratio = nix_count/(nix_count+top_count) #could smooth this\n return ratio",
"def percenter(rank, max_rank):\n\treturn 100 * (rank/(max_rank or 1))",
"def analysis_function_top_percent(self,clustering):\n clustering.sort_clusters_by_size()\n return clustering.get_population_percent_of_cluster(0)",
"def calculate_priors(trainingLabels):\r\n sum = 0\r\n priors = {}\r\n totalSamples = len(trainingLabels)\r\n classes = set(trainingLabels)\r\n for cls in classes:\r\n numCls = len(filter(lambda x: x == cls, trainingLabels))\r\n sum += numCls\r\n priors[cls] = float(numCls) / float(totalSamples)\r\n \r\n # Sanity check: valid partitioning\r\n assert(sum == totalSamples)\r\n\r\n return priors",
"def cluster_cal(self):\n self.Cluster = []\n for i in range(self.nodenum):\n neighborhood_node = self.neighbor_node(i)\n Node_num = len(neighborhood_node)\n Count = self.neighbor_edge(neighborhood_node)\n if(Node_num == 0 or Node_num == 1):\n self.Cluster.append(0.5)\n else:\n self.Cluster.append(Count/(Node_num*(Node_num - 1)))\n \n self.cluster_coeff = np.average(self.Cluster)",
"def get_cluster_to_split(clusters):\n\treturn max(clusters.items(), key=lambda x: x[1].get_distortion())[1]",
"def avg_sil_width(cluster_vec, centroid):\n tot_dis = 0\n for i in xrange(len(cluster_vec)):\n cos_dis = cos_sim(cluster_vec[i], centroid)\n tot_dis = tot_dis + cos_dis\n return tot_dis/len(cluster_vec)",
"def contains_percentage_of(self, other: 'Interval') -> float:\n if other.length == 0:\n return other.a in self\n intersection = Interval.intersection([self, other])\n return intersection.length / other.length if intersection else 0.0",
"def cluster_mcc_ratio(result, cluster_names, var, n=5):\n rel1=get_cluster_country_distr(result, var)\n clusters=calculate_cluster_size(result, var)\n hours=hours_tusc(result, var)\n res=\"\"\n for i in zip(clusters.index, cluster_names[:len(clusters)]):\n res=res+f\"By the number of unique visitors the {i[1]} cluster's top 5 countries are; \"\n rel=rel1.sort_values(i[0],ascending=False)[:n]\n for j in range(0,5):\n if j!=n-1:\n res=res+f'{rel[i[0]].index[j]} ({rel[i[0]][j]}%), '\n else:\n res=res+f'and {rel[i[0]].index[j]} ({rel[i[0]][j]}%). '\n res=res+f'This cluster spends on average {int(hours.hrs_in_tusc[i[0]])} days in Tuscany, '\n res=res+get_places_at_least4_hours(result, i[0], var)\n res=res+ cluster_airport_result(result, i[0], var)\n return res",
"def compute_clusters(self, p: float):\n pass"
] | [
"0.68239075",
"0.66668284",
"0.6481002",
"0.6474694",
"0.6268896",
"0.6198322",
"0.61978954",
"0.6153407",
"0.6145863",
"0.61173046",
"0.6088561",
"0.6067123",
"0.60514313",
"0.605035",
"0.60220045",
"0.6007003",
"0.59786797",
"0.5954374",
"0.5944898",
"0.591898",
"0.5904012",
"0.58934736",
"0.5888239",
"0.58542675",
"0.58466184",
"0.58107376",
"0.57912296",
"0.5785383",
"0.5767912",
"0.57631725"
] | 0.7220413 | 0 |
Returns the percent of noise elements in the dataset. | def analysis_function_noise_level(self, clustering, total_elements):
return 100.-(clustering.total_number_of_elements/float(total_elements))*100. | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _compute_noise_level(self, data):\n noise = max(data)\n noise_min = 2600\n noise_max = 4095\n ratio = (noise - noise_min)/(noise_max - noise_min)\n return int(ratio*100)",
"def getNoiseVar(img,fraction=0.95):\n last_val = np.percentile(img,fraction)\n #si(img<last_val,title=\"Pixel values considered as noise\")\n return np.var(img[img<last_val])",
"def noise(self):\n # Extract parameters\n pzs = self.params[0]\n ngals = np.array([pz.gals_per_steradian for pz in pzs])\n return 1.0 / ngals",
"def calculate_sample_silhouette(self):\n sum_samples = 0\n for cluster in self.cluster_lst:\n sum_samples += self.sum_silhouette(cluster)\n sample_size = len(self.samples)\n return sum_samples/sample_size",
"def noise_level(data):\n length=len(data) - 2\n dev=[]\n for i in range(1,length - 1):\n dev.append((abs(data[i] - data[i-1]) + abs(data[i] - data[i + 1]))/2)\n dev.sort()\n return dev[round(0.9*length)]",
"def noise(self, freq: int, /) -> None:",
"def mdape(self) -> float:\n return float(np.median(np.abs(self._percentage_error())) * 100)",
"def density(self):\r\n return self.count_ones() / float(self.xspan * self.yspan)",
"def rmdspe(self) -> float:\n return float(np.sqrt(np.median(np.square(self._percentage_error()))) * 100.0)",
"def noise(self):\n # Extract parameters\n pzs = self.params[0]\n # retrieve number of galaxies in each bins\n ngals = np.array([pz.gals_per_steradian for pz in pzs])\n if isinstance(self.config[\"sigma_e\"], list):\n sigma_e = np.array([s for s in self.config[\"sigma_e\"]])\n else:\n sigma_e = self.config[\"sigma_e\"]\n return sigma_e ** 2 / ngals",
"def mean(self):\r\n\t\treturn sum(self.sample)/len(self.sample)",
"def var(x):\n length = len(x)\n\n if length == 0:\n return None\n result = 0.0\n m = TinyStatistician.mean(x)\n for i in x:\n result += (i - m) ** 2\n\n return result / length",
"def prob1(n):\n#raise NotImplementedError(\"Problem 1 Incomplete\")\n if n == 0 :\n raise ValueError(\"Sampling 0 points is not defined.\")\n total = 0\n for i in xrange(n) :\n if np.random.normal() > 3 :\n total += 1\n return float(total)/n",
"def get_percentage_missing(series):\n num = series.isnull().sum()\n den = len(series)\n return round(num / den, 2)",
"def get_percentage_missing(series):\n num = series.isnull().sum()\n den = len(series)\n return round(num / den, 2)",
"def get_percent_wet():\n # Create an ADS1115 ADC (16-bit) instance.\n adc = Adafruit_ADS1x15.ADS1115()\n\n GAIN = 1\n DRY = 20280 # 100% Dry\n WET = 10140 # 100% Wet\n\n value = adc.read_adc(0, gain=GAIN)\n \n # print \"value: %d\" % value\n \n percent_dry = ((value - WET)*100)/(DRY-WET)\n percent_wet = 100 - percent_dry\n\n return percent_wet",
"def get_percentage_missing(series):\n num = series.isnull().sum()\n den = len(series)\n return round(num/den*100, 2)",
"def noise(self):\n return self._noise",
"def sampling_ratio(self):\n return self.coincidences / self.n",
"def get_percent(self, n):\n controlled = 0.00\n for i in range(len(self.tile_contents)):\n if(self.tile_contents[i].player_number == n):\n controlled += 1.00\n \n return float(controlled / self.paint_blocks)",
"def noiseFraction(truth_i3, measured_i3, tolerance):\n if (measured_i3.getNumberMolecules() == 0):\n return [0, truth_i3.getNumberMolecules()]\n \n noise_locs = 0\n total_locs = 0\n for i in range(truth_i3.getNumberFrames()):\n t_locs = truth_i3.getMoleculesInFrame(i+1)\n m_locs = measured_i3.getMoleculesInFrame(i+1, good_only = False)\n \n dist = utilC.peakToPeakDist(m_locs['xc'], m_locs['yc'], t_locs['xc'], t_locs['yc'])\n\n noise_locs += numpy.count_nonzero((dist > tolerance))\n total_locs += dist.size\n\n return [noise_locs, total_locs]",
"def prob1(n):\n\n # create a giant draw from a normal distribution\n random_draws = np.random.normal(loc= 0, scale = 1, size = n)\n\n # mask the values\n mask = random_draws > 3\n\n return np.sum(mask)/float(n)",
"def _ion_densities_datafiles(self):\n ne = self.ne_in\n nD = self.ni_in[0,:]\n nC = (ne-nD)/6.\n print(\"nC/nD: \"+str(np.mean(nC/nD)*100.)+\" %\")\n self.ni_in[0,:] = nD\n self.ni_in[1,:] = nC",
"def get_estimated_noise(self):\n return self.gp_core.noise_var",
"def add_noise(self, noise):\n if noise > 0.0:\n for key in self.counts:\n self.counts[key] *= 1.0 + noise * np.random.random_sample()",
"def smape(self) -> float:\n _temp = np.sum(2 * np.abs(self.predicted - self.true) / (np.abs(self.true) + np.abs(self.predicted)))\n return float(100 / len(self.true) * _temp)",
"def percentages(self) -> pandas.Series:\n if self._percentages is None:\n scalar = 1 if self.use_fraction else 100\n self._percentages = scalar * self.counts/self.total\n return self._percentages",
"def percent_passing(self) -> float:\n num_meas = Enumerable(self.mlc_meas).select_many(lambda m: m.passed).count()\n num_pass = (\n Enumerable(self.mlc_meas)\n .select_many(lambda m: m.passed)\n .count(lambda p: bool(p) is True)\n )\n return float(100 * num_pass / num_meas)",
"def density(self, x):\n\t\tN = len(self.train_data)\n\t\tpoints = list(self.train_data)\n\t\tdists = [np.linalg.norm(x-point)**2 for point in points]\n\t\texps = [np.exp(-dist / (2 * (self.bandwidth ** 2))) for dist in dists]\n\t\tunnormalized_sum = sum(exps)\n\t\tprobability = (1 / N) * self.normalizing_constant() * unnormalized_sum\n\t\treturn probability",
"def prob(self, w):\n return self.counts[w] / self.total_count"
] | [
"0.68183047",
"0.67847115",
"0.6347133",
"0.62229025",
"0.61716366",
"0.61085075",
"0.6024723",
"0.59983605",
"0.59494674",
"0.5946805",
"0.5945206",
"0.5942025",
"0.5883616",
"0.58745587",
"0.58745587",
"0.5860042",
"0.5856353",
"0.5841199",
"0.5834648",
"0.57891965",
"0.5787481",
"0.5779676",
"0.5754733",
"0.5739616",
"0.5738536",
"0.5734329",
"0.5733115",
"0.57128024",
"0.5706441",
"0.5677128"
] | 0.70178664 | 0 |
Returns the mean cluster size. | def analysis_function_mean_cluster_size(self,clustering):
sizes = get_cluster_sizes(clustering.clusters)[1]
return numpy.mean(sizes) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def mean_cluster(self, labelled_cluster):\n sum_of_points = self.sum_cluster(labelled_cluster)\n size_cluster = len(labelled_cluster)\n if self.sigma_cl1:\n size_cluster += np.sqrt(2)*self.sigma_cl1*np.random.randn()\n mean_of_points = sum_of_points * (1.0 / size_cluster)\n return mean_of_points",
"def meanContigLength(self):\n\t\tstats = self.scores()\n\t\treturn stats['meanContig']",
"def avgsize_c(self):\n return self._avgsize_c",
"def clusterSize(l, scheme, clustertype='fluid'):\n clist = findClusters(l, scheme, clustertype)\n \n avglists=[]\n for i in clist:\n avglist=[]\n for l in i:\n avglist.append(np.mean(l))\n avglists.append(np.mean(avglist))\n return avglists",
"def cluster_count(self) -> int:\n cluster_count = max(1, round(16**3 * (self.vein.purity / 100.0) / self.cluster_size))\n return self.distribution.scale_cluster_count(cluster_count)",
"def mean(self):\n mean = sum(self.data)/self.size\n return mean",
"def get_cluster_sizes(self,sc):\n\n clusterSizes = []\n clusters = sc.labels\n for k in np.arange(clusters.max()):\n clusterSizes.append(len(np.where(clusters==k)[0])) \n \n return clusterSizes",
"def get_cluster_count(self) -> int:\n return len(self.get_all_cluster_ids())",
"def avg_sil_width(cluster_vec, centroid):\n tot_dis = 0\n for i in xrange(len(cluster_vec)):\n cos_dis = cos_sim(cluster_vec[i], centroid)\n tot_dis = tot_dis + cos_dis\n return tot_dis/len(cluster_vec)",
"def meshsize_avg(self):\n nspans = self.numspans\n support = abs(self.kv[-1] - self.kv[0])\n return support / nspans",
"def average_consensus(self, cluster):\n\t\tcenterk = 0\n\t\tindex = 0\n\t\tfor value in cluster:\n\t\t\tcenterk += value\n\t\t\tindex += 1\n\t\tcenterk = centerk / index\n\t\treturn centerk",
"def average_size(self):\n sizes = []\n for i in range(self.params.num_trees):\n with tf.device(self.device_assigner.get_device(i)):\n sizes.append(self.trees[i].size())\n return tf.reduce_mean(tf.pack(sizes))",
"def n_clusters(self):\n return len(self.clusters)",
"def graph_data_size_avg(self) -> float:\n return float(self.graph_tuple_stats.graph_data_size_avg or 0)",
"def group_size_dist(self):\n return self.group_sizes() / self.group_sizes().sum()",
"def avg_net(self) -> float:\n return torch.mean(self.units.net)",
"def average_city_size(self):\r\n average = 0\r\n total = 0\r\n for code, node in self.vertices.items():\r\n average += node.population\r\n total += 1\r\n return average // total",
"def ensemble_mean(self):\n return self.mean(dim='mem')",
"def analysis_function_num_clusters(self,clustering):\n return len(clustering.clusters)",
"def analysis_function_total_elements(self,clustering):\n return clustering.total_number_of_elements",
"def n_clusters(self):\n return self.model.n_clusters",
"def getCentroid(cluster):\n try:\n return np.mean(cluster, axis = 0)\n except:\n return None",
"def get_cluster_average(cls, indices, dist_mat):\n distances = cls.get_all_distances(indices, dist_mat)\n return np.mean(distances)",
"def gal_count(clusters):\n sum = 0\n for x in clusters:\n sum += x.ngal\n return sum",
"def avgcpu(self):\n return (self._total_cpu['value'] / self._total_cpu['count']) if self._total_cpu['count'] else 0",
"def extract_cluster_size(line):\r\n cluster_size = line.split(\":\")[-1]\r\n\r\n try:\r\n cluster_size = int(cluster_size)\r\n except ValueError:\r\n return 0\r\n return cluster_size",
"def get_clust_cent(self):\r\n\r\n return self.__clust_cent",
"def cf_mean(self):\n return self['capacity_factor'] / 100",
"def cf_mean(self):\n return self['capacity_factor'] / 100",
"def mean(self):\n return self._mean"
] | [
"0.7243587",
"0.7179832",
"0.7015998",
"0.7009933",
"0.68922645",
"0.68876517",
"0.684724",
"0.6808377",
"0.669405",
"0.6671428",
"0.6667033",
"0.6651806",
"0.65882844",
"0.6482646",
"0.63869536",
"0.63827366",
"0.63742137",
"0.63045746",
"0.6303817",
"0.62123394",
"0.61937535",
"0.61916125",
"0.6186689",
"0.6182542",
"0.618072",
"0.61401653",
"0.6137394",
"0.6104162",
"0.6104162",
"0.6092552"
] | 0.8215371 | 0 |
This method create a project in pivotal tracker | def create_project():
client = RequestManager()
project_name = "".join(choices(string.ascii_letters + string.digits, k=10))
client.set_method("POST")
client.set_endpoint("/projects")
body = {"name": project_name}
client.set_body(json.dumps(body))
response = client.execute_request()
STORED_ID['project_id'] = response.json()['id'] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def project_create(project):\n client.project.create(project)",
"def test_create_project(self):\n pass",
"def test_create_project(self):\n pass",
"def test_create_project(self):\n pass",
"def create_project(self, **kwargs):\n save = kwargs.get('save', True) \n if kwargs.has_key('save'):\n del(kwargs['save'])\n\n index = self.object_index()\n defaults = dict(slug = \"test-project-%s\" % index,\n basecamp_url = \"https://foo.basecamphq.com/projects/%s/log\" % index)\n defaults.update(kwargs)\n p = Project(**defaults)\n\n if save:\n p.save()\n self.assert_(p.id)\n return p",
"def _create_project(self):\n request = {\n \"project\": {\n \"description\": \"description\",\n \"enabled\": True,\n \"name\": uuid.uuid4().hex,\n \"domain_id\": \"default\",\n }\n }\n response = self.client.post(PROJECT_PATH, data=json.dumps(request),\n headers=HEADERS)\n\n if response.status_code == 201:\n return response.json()\n else:\n raise SystemExit(\"Failed to create project.\")",
"def createProject(self, payLoad):\n\n uri = \"/v1/projects/\" \n response = self.client.post(uri, payLoad)\n return response",
"def test_create_project_request(self):\n pass",
"def ktrack_project(ktrack_instance):\n project = ktrack_instance.create(\"project\", {\"name\": \"My_Test_Project\"})\n return project",
"def project_created_handler(event):\n obj = event.obj\n # submit Project after creation\n obj.workflow.start()",
"def create_project_info(data):\n\t\n\tproject = ProjectInfo()\n\tproject.name = data['name']\n\tproject.description = data['description']\n\tproject.start_date = data['start_date']\n\tproject.end_date = data['end_date']\n\tproject.save()\n\tprint ('Inserted')\n\treturn True",
"def test_new_project_existing(self):\n\n project = fake_clients.FakeProject(name=\"test_project\")\n\n setup_identity_cache(projects=[project])\n\n url = \"/v1/actions/CreateProjectAndUser\"\n data = {\"project_name\": \"test_project\", \"email\": \"test@example.com\"}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n\n headers = {\n \"project_name\": \"test_project\",\n \"project_id\": \"test_project_id\",\n \"roles\": \"admin,member\",\n \"username\": \"test@example.com\",\n \"user_id\": \"test_user_id\",\n \"authenticated\": True,\n }\n new_task = Task.objects.all()[0]\n url = \"/v1/tasks/\" + new_task.uuid\n response = self.client.post(\n url, {\"approved\": True}, format=\"json\", headers=headers\n )\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(response.json(), {\"errors\": [\"actions invalid\"]})",
"def test_project_creation(self):\n title = 'Project title'\n code = 'SCW-12345'\n project = self.create_project(\n title=title,\n code=code,\n institution=self.institution,\n tech_lead=self.project_owner,\n category=self.category,\n funding_source=self.funding_source,\n )\n self.assertTrue(isinstance(project, Project))\n self.assertEqual(project.__str__(), code + ' - ' + title)\n self.assertEqual(project.status, Project.AWAITING_APPROVAL)\n self.assertEqual(project.title, title)\n self.assertEqual(project.code, code)\n self.assertTrue(project.awaiting_approval())",
"def test_create_project(self):\n yield self.nodes[0].overlay.create_project(\"test\", \"specpointer\", \"01-02-03\", 300, \"EUR\", 5)\n yield self.deliver_messages()\n\n # Node 2 should know about this app request now\n projects = self.nodes[1].overlay.persistence.get_projects()\n self.assertTrue(projects)\n self.assertEqual(projects[0]['id'], 1)",
"def post_project_create(self, resource_dict):\n pass",
"def test_projects_post(self):\n project = Project()\n response = self.client.open('/project-tracker/projects',\n method='POST',\n data=json.dumps(project),\n content_type='application/json')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))",
"def _create_project(org, project_name):\n project = Project(\n org=org,\n name=project_name\n )\n project.save()\n return project",
"def create_project(projectname):\n auth_id = request.get_json().get(\"auth_id\")\n storage_accesses = request.get_json().get(\"storage_accesses\", [])\n response = jsonify(\n admin.create_project(\n current_app.scoped_session(), projectname, auth_id, storage_accesses\n )\n )\n return response",
"def test_add_project(self):\n pass",
"def create_project(self, pool, project, arg):\n self.verify_pool(pool)\n svc = self.project_path % (pool, project)\n ret = self.rclient.get(svc)\n if ret.status != restclient.Status.OK:\n svc = self.projects_path % pool\n ret = self.rclient.post(svc, arg)\n if ret.status != restclient.Status.CREATED:\n exception_msg = (_('Error creating project: '\n '%(project)s on '\n 'pool: %(pool)s '\n 'return code: %(ret.status)d '\n 'message: %(ret.data)s.')\n % {'project': project,\n 'pool': pool,\n 'ret.status': ret.status,\n 'ret.data': ret.data})\n raise exception.ShareBackendException(msg=exception_msg)",
"def create_new_project(project_name, token=None):\n session = konfuzio_session(token)\n url = create_new_project_url()\n new_project_data = {\"name\": project_name}\n r = session.post(url=url, json=new_project_data)\n return r",
"def create_project(self, **kwargs):\n _url = f\"{self.base_url}/projects\"\n if \"name\" not in kwargs:\n raise ValueError(\"Parameter 'name' is mandatory\")\n return self.http_call(\"post\", _url, json_data=kwargs).json()",
"def test_new_project(self):\n\n setup_identity_cache()\n\n url = \"/v1/actions/CreateProjectAndUser\"\n data = {\"project_name\": \"test_project\", \"email\": \"test@example.com\"}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n\n headers = {\n \"project_name\": \"test_project\",\n \"project_id\": \"test_project_id\",\n \"roles\": \"admin,member\",\n \"username\": \"test@example.com\",\n \"user_id\": \"test_user_id\",\n \"authenticated\": True,\n }\n new_task = Task.objects.all()[0]\n url = \"/v1/tasks/\" + new_task.uuid\n response = self.client.post(\n url, {\"approved\": True}, format=\"json\", headers=headers\n )\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n self.assertEqual(response.json(), {\"notes\": [\"created token\"]})\n\n new_project = fake_clients.identity_cache[\"new_projects\"][0]\n self.assertEqual(new_project.name, \"test_project\")\n\n new_token = Token.objects.all()[0]\n url = \"/v1/tokens/\" + new_token.token\n data = {\"password\": \"testpassword\"}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)",
"def test_create_project(client, session, tokens):\n response = client.post(\n \"/projects\",\n json={\n \"name\": \"New Project\",\n \"organizations\": [],\n \"teams\": [],\n \"users\": [],\n },\n headers={\"Authorization\": f\"Bearer {tokens['write']}\"},\n )\n assert response.status_code == 201\n project_id = response.json[\"id\"]\n assert Project.query.filter(Project.id == project_id).count() == 1",
"def test_create_project_target_enabled(self):\n self.assertEqual(Project.objects.count(), 2)\n url = reverse('projectroles:api_project_create')\n post_data = {\n 'title': NEW_PROJECT_TITLE,\n 'type': PROJECT_TYPE_PROJECT,\n 'parent': str(self.category.sodar_uuid),\n 'description': 'description',\n 'readme': 'readme',\n 'public_guest_access': False,\n 'owner': str(self.user.sodar_uuid),\n }\n response = self.request_knox(url, method='POST', data=post_data)\n self.assertEqual(response.status_code, 201, msg=response.content)\n self.assertEqual(Project.objects.count(), 3)",
"def create(self):\n \n # create the sequence structure by calling the self.project.create\n self.project.create()",
"def create(self, token: Any):\n params = [token, ]\n method = \"ProjectAPI.Create\"\n self.__add_request(method, params, lambda payload: Definition.from_json(payload))",
"def post(self, data):\n conn = pecan.request.db_conn\n try:\n project = db_models.Project(**data.as_dict())\n return conn.create_project(request.context, project)\n except Exception:\n LOG.exception('Fail to create project: %s' % data.as_dict())\n raise exception.ProjectCreateFailed(project_id=data.project_id,\n user_id=data.user_id)",
"def test_duplicate_tasks_new_project(self):\n\n setup_identity_cache()\n\n url = \"/v1/actions/CreateProjectAndUser\"\n data = {\"project_name\": \"test_project\", \"email\": \"test@example.com\"}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_409_CONFLICT)\n\n data = {\"project_name\": \"test_project_2\", \"email\": \"test@example.com\"}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)",
"def test_projects_id_post(self):\n project = Project()\n response = self.client.open('/project-tracker/projects/{id}'.format(id=3.4),\n method='POST',\n data=json.dumps(project),\n content_type='application/json')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))"
] | [
"0.7641362",
"0.74125963",
"0.74125963",
"0.74125963",
"0.73800826",
"0.7369805",
"0.72611123",
"0.7240642",
"0.72367054",
"0.7189181",
"0.71309054",
"0.70826024",
"0.70679694",
"0.7061639",
"0.6987262",
"0.698142",
"0.696992",
"0.6924102",
"0.69188553",
"0.6910896",
"0.6910389",
"0.6896292",
"0.6886656",
"0.6839304",
"0.6798925",
"0.67972517",
"0.67882097",
"0.67828375",
"0.67761225",
"0.67730415"
] | 0.75777197 | 1 |
Static method for delete all projects. | def delete_all_projects():
client = RequestManager()
client.set_method("GET")
client.set_endpoint("/projects")
response = client.execute_request()
for project in response.json():
try:
ProjectHelper.delete_project(project["id"])
except TypeError:
LOGGER.info(project) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def clear(self):\n for project in Project.objects:\n project.delete()",
"def __remove_all_projects__():\n p = subprocess.Popen('rm -rf {}/.wcscanner/*'.format(context.__BASE_PATH__), shell=True)\n p.wait()",
"def clean_project(self, app_name=None, delete_all=False):\n\n if not app_name and not delete_all:\n ConuException(\"You need to specify either app_name or set delete_all=True\")\n\n if delete_all:\n args = [\"--all\"]\n logger.info('Deleting all objects in current project')\n else:\n args = \"-l app=%s\" % app_name\n logger.info('Deleting all objects with label app=%s', app_name)\n\n try:\n o = run_cmd(self._oc_command([\"delete\", \"all\", args]),\n return_output=True)\n o_lines = o.split('\\n')\n for line in o_lines:\n logger.info(line)\n except subprocess.CalledProcessError as ex:\n raise ConuException(\"Cleanup failed because of exception: %s\" % ex)",
"def clear_all(delete_id):\n Tasks.query.filter(Tasks.project_id == delete_id).delete()\n Projects.query.filter(Projects.project_id == delete_id).delete()\n db.session.commit()\n\n return redirect('/')",
"def clean(self):\n\n if not self.__projects:\n return\n\n Console.info(\"Cleaning session...\")\n Console.indent()\n\n for project in self.__projects:\n project.clean()\n\n path = os.path.abspath(os.path.join(\".jasy\", \"locale\"))\n if os.path.exists(path):\n Console.info(\"Cleaning up locale project...\")\n shutil.rmtree(path)\n\n path = os.path.abspath(os.path.join(\".jasy\", \"virtual\"))\n if os.path.exists(path):\n Console.info(\"Cleaning up virtual project...\")\n shutil.rmtree(path)\n\n Console.outdent()",
"def delete_project(arn=None):\n pass",
"def delete(self):\n _url = f\"{self.connector.base_url}/projects/{self.project_id}\"\n\n self.connector.http_call(\"delete\", _url)\n\n self.project_id = None\n self.name = None",
"def test_delete_project(self):\n pass",
"def test_delete_project(self):\n pass",
"def project_delete(cursor, project):\n haystack = (project['_id'], )\n\n query = \"DELETE FROM projects WHERE _id=?\"\n try:\n cursor.execute(query, haystack)\n except Exception as e:\n on_error(e)\n\n query = \"DELETE FROM namespaces WHERE project_id=?\"\n try:\n cursor.execute(query, haystack)\n except Exception as e:\n on_error(e)\n else:\n cursor.connection.commit()\n raise Return((True, None))",
"def tearDown(self):\n Project.objects.all().delete()",
"def delete_stored_project():\n client = RequestManager()\n client.set_method(\"DELETE\")\n client.set_endpoint(\"/projects/{0}\".format(STORED_ID['project_id']))\n client.execute_request()",
"def delete_orphan_project(apps, schema_editor):\n Project = apps.get_model('data_manager', 'Project')\n Project.objects.filter(dataset__isnull=True).delete()\n return",
"def tearDownClass(cls):\n projects = ['arc_project_for_testing_delete_after_usage1', 'arc_project_for_testing_delete_after_usage2',\n 'ar c', 'ar:c', 'ar<c', 'ar%c']\n for project in projects:\n project_directory = os.path.join(arc_path, 'Projects', project)\n shutil.rmtree(project_directory)",
"def test_projects_delete(self):\n project = Project()\n response = self.client.open('/project-tracker/projects',\n method='DELETE',\n data=json.dumps(project),\n content_type='application/json')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))",
"def test_delete_project(self):\n self.assertEqual(Project.objects.count(), 1)\n self.assertEqual(Group.objects.count(), 2)\n\n delete_project(Project.objects.get(name=\"project A\"))\n\n self.assertEqual(Project.objects.count(), 0)\n self.assertEqual(Group.objects.count(), 0)",
"def es_delete(project=None):\n if project is not None:\n script_indexer.delete_project(project)\n else:\n script_indexer.delete_all()",
"def do_project_delete(cs, args):\n key = args.project\n if cs.projects.is_id(key):\n id = key\n else:\n id = cs.projects.get_id_by_name(key)\n cs.projects.delete(id)\n print(\"Delete Project '%s' successfully.\" % key)",
"def delete_project(project):\n with BMI(_username, _password, constants.BMI_ADMIN_PROJECT) as bmi:\n ret = bmi.delete_project(project)\n if ret[constants.STATUS_CODE_KEY] == 200:\n click.echo(\"Success\")\n else:\n click.echo(ret[constants.MESSAGE_KEY])",
"def delete(self, request, p_name):\n project = Project.objects.get(name=p_name)\n connectors = project.connector_set.all()\n connectors.delete()\n if os.path.isfile(project.project_location):\n os.remove(project.project_location)\n project.delete()\n return HttpResponse(HTTPStatus.OK)",
"def delete(self, oid):\n path = '/projects/%s' % oid\n res = self.client.call(path, 'DELETE', data='', token=self.manager.identity.token)\n self.logger.debug('Delete openstack project: %s' % truncate(res))\n return True",
"def delete_project(\n name\n):\n\n cmd = dict()\n cmd[\"type_\"] = \"delete_project\"\n cmd[\"name_\"] = name\n\n comm.send(cmd)",
"def delete_project(projectname):\n response = jsonify(admin.delete_project(current_app.scoped_session(), projectname))\n return response",
"def delete_project(project_id):\n client = RequestManager()\n client.set_method(\"DELETE\")\n client.set_endpoint(\"/projects/{0}\".format(project_id))\n client.execute_request()",
"def delete_all(self):\n raise NotImplementedError()",
"def delete(conn, project):\n with conn:\n c = conn.cursor()\n c.execute(\"DELETE FROM projects WHERE project =?\", (project,))",
"def remove_single_project(project_name):\n p = subprocess.Popen('rm -rf {}/{}'.format(context.__PROJECTS_PATH__, project_name), shell=True)\n p.wait()",
"def destroy(config, args):\n log = logging.getLogger('kraftwerk.destroy')\n if confirm(\"Remove project %s from node %s along with all services and data?\" % \n (args.project.name, args.node.hostname)):\n args.node.ssh(config.template(\"scripts/project_destroy.sh\", project=args.project))\n print \"Project %s removed from node %s\" % \\\n (args.project.name, args.node.hostname )\n for service in args.project.services(args.node):\n args.node.ssh(service.destroy_script)",
"def delete_project_files(self, project, logStat):\n from corrdb.common.models import FileModel\n from corrdb.common.models import EnvironmentModel\n\n for _file in project.resources:\n file_ = FileModel.objects.with_id(_file)\n if file_:\n result = self.storage_delete_file(file_.group, file_.storage)\n if result:\n logStat(deleted=True, file_obj=file_)\n file_.delete()\n\n for record in project.records:\n result = self.delete_record_files(record, logStat)\n if result:\n logStat(deleted=True, record=record)\n record.delete()\n\n for environment_id in project.history:\n _environment = EnvironmentModel.objects.with_id(environment_id)\n if _environment:\n if _environment.bundle and _environment.bundle.scope == \"local\":\n result = self.storage_delete_file('bundle', _environment.bundle.storage)\n if result:\n # logStat(deleted=True, bundle=_environment.bundle)\n # logStat(deleted=True, environment=_environment)\n _environment.bundle.delete()\n # else:\n # logStat(deleted=True, environment=_environment)\n _environment.delete()",
"def delete_all(self, prog:progress=None): \n\t\tself.__output_status('Delete all files')\n\t\tif (self.__check_terminated()):\n\t\t\treturn;\t\n\t\tdelete_dir(self.root)\n\t\ttime.sleep(0.3)"
] | [
"0.7438387",
"0.74334717",
"0.7418019",
"0.6865698",
"0.6836307",
"0.6812033",
"0.6775198",
"0.6739514",
"0.6739514",
"0.6619207",
"0.6606535",
"0.65799767",
"0.65798426",
"0.65713173",
"0.6563861",
"0.6534334",
"0.64856863",
"0.6476536",
"0.6473611",
"0.6437883",
"0.6420292",
"0.6313049",
"0.62967867",
"0.62856716",
"0.62752104",
"0.6199454",
"0.6188569",
"0.61832356",
"0.6182946",
"0.61698896"
] | 0.8597782 | 0 |
Decorator that returns 403 status if user isn't logged in instead of redirecting to the LOGIN_URL | def login_required_403(view):
@wraps(view)
def dec_view(request, *args, **kwargs):
if not request.user.is_authenticated():
return JsonResponse({"detail": "You have to log in"}, status=403)
return view(request, *args, **kwargs)
return dec_view | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def login_required(f):\n @functools.wraps(f)\n def wrap(*args, **kwargs):\n if not user_session.is_auth:\n raise Forbidden()\n return f(*args, **kwargs)\n return wrap",
"def not_authenticated(func):\n def decorated(request, *args, **kwargs):\n if request.user.is_authenticated():\n return HttpResponseRedirect(get_next_url(request))\n return func(request, *args, **kwargs)\n return decorated",
"def login_required(func):\n\t@wraps(func)\n\tdef decorated_view(*args, **kwargs):\n\t\tif not users.get_current_user():\n\t\t\treturn redirect(users.create_login_url(request.url))\n\t\treturn func(*args, **kwargs)\n\treturn decorated_view",
"def require_login(func):\n @wraps(func)\n def wrapper(*args, **kwrds):\n try:\n user = get_user()\n if user:\n setattr(g, 'user', user)\n return func(*args, **kwrds)\n else:\n url = url_for('auth.login', redir=request.url)\n return redirect(url)\n except:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n log = app_logger()\n log.warning(\"Unexpected error: %s\", exc_value)\n log.error(''.join(traceback.format_exception(\n exc_type, exc_value, exc_traceback\n )))\n return abort(500)\n\n return wrapper",
"def not_logged_in(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if 'idToken' in session:\n return redirect(url_for('index'))\n else:\n return f(*args, **kwargs)\n return decorated_function",
"def login_required(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if 'user_id' not in login_session:\n flash('User not allowed to access')\n return redirect('/login')\n return f(*args, **kwargs)\n return decorated_function",
"def login_required(func):\n @wraps(func)\n def decorated_view(*args, **kwargs):\n current = users.get_current_user()\n if not current:\n return redirect(users.create_login_url(request.url))\n elif current.email() == 'akshayubhat@gmail.com':\n return func(*args, **kwargs)\n else:\n return redirect(users.create_logout_url(request.url))\n return decorated_view",
"def login_required(func):\n @wraps(func)\n def f(*args, **kwargs):\n if g.user is None:\n app.logger.info('redirecting not logged in user')\n return redirect(url_for('index'))\n elif not g.user.initialized and f.__name__ not in ['profile_create','logout']:\n return redirect(url_for('profile_create'))\n else:\n return func(*args, **kwargs)\n return f",
"def user_login_required(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if not current_user.is_authenticated:\n # Flash warning that user login is required\n flash(\"Logged in user only.\", category=\"error\")\n # Return redirect to login\n return redirect(url_for('main.login'))\n if current_user.banned:\n # Log user out so they can't access their account\n logout_user()\n # Flash warning that user has been banned\n flash(\"You have been banned, please contact an admin.\",\n category=\"error\")\n # Return redirect to index\n return redirect(url_for('main.index'))\n return f(*args, **kwargs)\n return decorated_function",
"def unauthorized_handler(self):\n return flask.redirect(\"/login\")",
"def login_required(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if session.get(\"user_id\") is None:\n return redirect(url_for(\"index\"))\n return f(*args, **kwargs)\n return decorated_function",
"def require_logged_in():\n def handler(f, *args, **kwargs):\n if args[0].current_user is not None:\n return f(*args, **kwargs)\n else:\n raise HTTPFound(args[0].route_url('user.login', _query={'redirect': encode_route(args[0])}))\n return decorator(handler)",
"def login_required(func):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n if g.user is None:\n flash('You have to log in first')\n return redirect(url_for('authentication.login', next=url_for(request.endpoint)))\n return func(*args, **kwargs)\n return wrapper",
"def admin_required(func):\n @wraps(func)\n def decorated_view(*args, **kwargs):\n if users.get_current_user():\n if not users.is_current_user_admin():\n abort(401) # Unauthorized\n return func(*args, **kwargs)\n return redirect(users.create_login_url(request.url))\n return decorated_view",
"def login_required(f):\n\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if g.USER is None:\n return redirect(\n url_for(\"home\", force_login=True, next=request.url)\n )\n\n return f(*args, **kwargs)\n\n return decorated_function",
"def login_required(func):\n @wraps(func)\n def decorated_view(*args, **kwargs):\n if current_app.login_manager._login_disabled:\n return func(*args, **kwargs)\n elif not current_user.is_authenticated and not current_user.is_active:\n return current_app.login_manager.unauthorized()\n return func(*args, **kwargs)\n\n return decorated_view",
"def login_required(func):\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n if 'username' not in login_session:\n return redirect('login')\n else:\n return func(*args, **kwargs)\n return wrapper",
"def anonymous_required(func):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n if g.user:\n flash('You have been already logged in')\n return redirect(url_for('view.index'))\n return func(*args, **kwargs)\n return wrapper",
"def login_required(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n try:\n if session['user_id']:\n pass\n except KeyError:\n return redirect(url_for('users.login', next=request.url))\n return f(*args, **kwargs)\n return decorated_function",
"def login_required(function):\n\n @wraps(function)\n def wrapper(self, *args, **kw):\n \"\"\"Redirect to main if user doesn't logged in. \"\"\"\n\n if not self.user:\n return self.redirect('/blog/login')\n return function(self, *args, **kw)\n return wrapper",
"def login_required(func):\n @wraps(func)\n def wrapped(*args, **kwargs):\n if not session.get('logged_in', False):\n abort(401)\n return func(*args, **kwargs)\n return wrapped",
"def login_required(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n # checks is user login\n if session.get(\"user_id\") is None:\n return redirect(url_for(\"login\", next=request.url))\n return f(*args, **kwargs)\n return decorated_function",
"def login_required(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if 'username' in login_session:\n return f(*args, **kwargs)\n else:\n flash(\"You are not allowed to access there\")\n return redirect('/login')\n return decorated_function",
"def admin_required(func):\n\t@wraps(func)\n\tdef decorated_view(*args, **kwargs):\n\t\tif users.get_current_user():\n\t\t\tif not users.is_current_user_admin():\n\t\t\t\tabort(401) # Unauthorized\n\t\t\treturn func(*args, **kwargs)\n\t\treturn redirect(users.create_login_url(request.url))\n\treturn decorated_view",
"def decorated_function(*args, **kwargs):\n if not session.get('username'):\n flash('Login to continue', 'warning')\n return redirect(url_for('sign_in', next=request.url))\n return func(*args, **kwargs)",
"def require_user(func):\n\n @wraps(func)\n def decorator(*args, **kwargs):\n if not g.user:\n # flash('此操作需要登录账户')\n return render_template('account/error.html', error='抱歉,您无权进行该操作!')\n # return redirect(url_for('site.login'))\n return func(*args, **kwargs)\n\n return decorator",
"def login_required(func):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n if 'username' in session:\n return func(*args, **kwargs)\n else:\n flash(\"You must be logged in to access that page.\", 'danger')\n return redirect(url_for('login'))\n return wrapper",
"def login_required(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if session.get(\"user_id\") is None:\n return redirect(\"/\")\n return f(*args, **kwargs)\n return decorated_function",
"def login_required(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if session.get(\"user_id\") is None:\n return redirect(\"/\")\n return f(*args, **kwargs)\n return decorated_function",
"def login_required(f):\r\n @wraps(f)\r\n def wrap(*args, **kwargs):\r\n if \"logged_in\" in session:\r\n return f(*args, **kwargs)\r\n return redirect(\"/user/login\")\r\n return wrap"
] | [
"0.7650609",
"0.7616408",
"0.7589018",
"0.7559412",
"0.7558609",
"0.7510281",
"0.7494149",
"0.7491331",
"0.7440667",
"0.74237275",
"0.74189824",
"0.74179274",
"0.7398192",
"0.73772556",
"0.7373833",
"0.73519063",
"0.7316154",
"0.7309339",
"0.7299229",
"0.7293337",
"0.7288219",
"0.7288136",
"0.72664815",
"0.7252768",
"0.7223765",
"0.72029126",
"0.71969515",
"0.7176953",
"0.7176953",
"0.71739274"
] | 0.79399925 | 0 |
Login with an accesscode | def accesscode(request, code):
employee = Employee.objects.get(access_code=code)
user = employee.user
user.backend = 'django.contrib.auth.backends.ModelBackend'
login(request, user)
return HttpResponseRedirect('/') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def login():",
"def login():",
"def login(self):\n self.open(self.urls['login'])\n self.select_form(nr=0)\n\n self.form['custno'] = self.username\n self.form['password'] = self.password\n res = self.submit()\n \n return res",
"def login():\n url = AUTH_URL + '&state=' + str(uuid1())\n try:\n import webbrowser\n webbrowser.open(url)\n except:\n pass\n \n print('Go to the following link in your browser:\\n\\n\\t{}\\n'.format(url))\n\n auth_code = input('Enter verification code: ')\n print('\\nObtaining access token...')\n Spotify.refresh(auth_code)\n print('Credentials saved to {}'.format(CREDS_PATH))\n return",
"def login(self):",
"def post_login(self, username, password, code):\n values = {\"ctl00$ContentPlaceHolder1$MemberName\" : username,\n \"ctl00$ContentPlaceHolder1$MemberPass\" : password,\n \"ctl00$ContentPlaceHolder1$CheckCode\" : code,\n \"__EVENTTARGET\" : \"\",\n \"__EVENTARGUMENT\" : \"\",\n \"__VIEWSTATE\" : \"/wEPDwUJNjIyMTU5NjYyZGQ=\",\n \"__EVENTVALIDATION\" : \"/wEWBQLwhorcCAKxtMbHCgKNvavEBwKCi/rDCQKM5+qlBA==\",\n \"ctl00$ContentPlaceHolder1$RegBtn\": \"登 录\",\n }\n result = self.data_post(values, self.login_url, self.login_send_header)\n return result",
"def login():\n auth_state = str(uuid.uuid4())\n SESSION.auth_state = auth_state\n\n # For this sample, the user selects an account to authenticate. Change\n # this value to 'none' for \"silent SSO\" behavior, and if the user is\n # already authenticated they won't need to re-authenticate.\n prompt_behavior = 'select_account'\n\n params = urllib.parse.urlencode({'response_type': 'code',\n 'client_id': config.CLIENT_ID,\n 'redirect_uri': config.REDIRECT_URI,\n 'state': auth_state,\n 'resource': config.RESOURCE,\n 'prompt': prompt_behavior})\n\n return bottle.redirect(config.AUTHORITY_URL + '/oauth2/authorize?' + params)",
"def login(self):\n r = self._login_token()",
"def login(self):\n login_form = {\"kid\": \"\",\n \"uni\": self.server,\n \"login\": self.username,\n \"pass\": self.password}\n url = \"https://%s.ogame.gameforge.com/main/login\" % self.country_code\n result = self.session.post(url, data=login_form)",
"def login():\n pass",
"def do_login(self, backend, user):",
"def login(self):\r\n user_account = db.find_one({\"cpr_number\": request.form.get(\"CPR\")})\r\n if user_account is not None:\r\n if self.verify_password(user_account[\"password\"], request.form.get(\"password\")):\r\n return self.start_session(user_account)\r\n return jsonify({\"error\": \"Invalid login credentials\"}), 401",
"def login():\n if app.testing:\n callback_url = url_for('user.authorize', _external=True)\n else:\n callback_url = 'https://codegolf.uqcs.org.au/user/authorize'\n return git_auth.authorize(callback=callback_url)",
"def _login(self, login):\n self._tokens.clear()\n name, password = login\n\n params = {\"action\": \"query\", \"meta\": \"tokens\", \"type\": \"login\"}\n with self._api_lock:\n result = self._api_query(params, no_assert=True)\n try:\n token = result[\"query\"][\"tokens\"][\"logintoken\"]\n except KeyError:\n raise exceptions.LoginError(\"Couldn't get login token\")\n\n params = {\"action\": \"login\", \"lgname\": name, \"lgpassword\": password,\n \"lgtoken\": token}\n with self._api_lock:\n result = self._api_query(params, no_assert=True)\n\n res = result[\"login\"][\"result\"]\n if res == \"Success\":\n self._tokens.clear()\n self._save_cookiejar()\n return\n if res == \"Illegal\":\n e = \"The provided username is illegal.\"\n elif res == \"NotExists\":\n e = \"The provided username does not exist.\"\n elif res == \"EmptyPass\":\n e = \"No password was given.\"\n elif res == \"WrongPass\" or res == \"WrongPluginPass\":\n e = \"The given password is incorrect.\"\n else:\n e = \"Couldn't login; server says '{0}'.\".format(res)\n raise exceptions.LoginError(e)",
"def mfa_login(self, mfacode):\n\n try:\n\n response = self.post(\"/authentication/login\",\n {\"user\": self.user, \"password\": self.password, \"token\": int(mfacode)})\n if response.status_code == 200:\n print(\"{0}: Orchestrator MFA login success\".format(self.url))\n # get and set X-XSRF-TOKEN\n for cookie in response.cookies:\n if cookie.name == \"orchCsrfToken\":\n self.headers[\"X-XSRF-TOKEN\"] = cookie.value\n return True\n else:\n print(\"{0}: Orchestrator MFA login failed: {1}\".format(self.url, response.text))\n return False\n except:\n print(\"{0}: Exception - unable to connect to Orchestrator\".format(self.url))\n return False",
"def make_login(your_username, your_password):\r\n instagram.with_credentials(your_username, your_password)\r\n instagram.login()",
"def login(email, password):\n rino.login.login(email, password)",
"def login(email, password):\n rino.login.login(email, password)",
"def login():\n login_hex = request.json.get(\"authentication\")\n if not login_hex:\n return jsonify({\"code\": \"1\", \"type\": \"user\"})\n\n qr_code_password = app.config[\"QRCODE_PASSWORD\"]\n\n if login_hex != qr_code_password:\n return jsonify({\"code\": \"3\"})\n \n jwt_token = generate_token({\"id\": generate_id()})\n\n return jsonify({\"code\": \"0\", \"token\": jwt_token})",
"def login(self, name, pin):\n self.account = self.bank.get(name, pin)\n if self.account:\n return \"success\"\n else:\n return \"faliure\"",
"def login(self):\n res = self.sess.get(self._login_url)\n execution = re.search('name=\"execution\" value=\"(.*?)\"', res.text).group(1)\n res = self.sess.get(url=self._pub_key_url).json()\n n, e = res['modulus'], res['exponent']\n encrypt_password = rsa_encrypt(self.password, e, n)\n data = {\n 'username': self.username,\n 'password': encrypt_password,\n 'execution': execution,\n '_eventId': 'submit'\n }\n res = self.sess.post(url=self._login_url, data=data)\n\n # check if login successfully\n if '统一身份认证' in res.content.decode():\n self.status = \"FAILED_LOGIN\"\n raise LoginError('Login failed. Please check your ZJU username and password.')\n logger.info(\"%s Successfully logined.\" % self)\n self.status = \"LOGINED\"\n return self.sess",
"def login(self, account, password):\n url = 'https://ceq.nkust.edu.tw/Login'\n\n data = {\n '__RequestVerificationToken': self.csrf_key,\n 'UserAccount': account,\n 'Password': password,\n }\n res = self.main_session.post(url=url, data=data, allow_redirects=False)\n if res.status_code == 302:\n soup = BeautifulSoup(res.text, 'html.parser')\n status = soup.find('a')['href']\n if status == '/StuFillIn':\n return True\n return False",
"def _login(self, *args, **kwargs):\n pass",
"def login(self):\n \n self.br.open(\"http://kanji.koohii.com/login\")\n self.br.form = list(self.br.forms())[0]\n self.br[\"username\"] = USER\n self.br[\"password\"] = PASSWORD\n my_response = self.br.submit()\n print \"Login successful\"",
"def do_login(user, password):\n return um.do_login(user, password)",
"def login(self, response):\n\t\treturn FormRequest.from_response(response,\n\t\t\t formdata={'username': 'scanner1', 'password': 'scanner1'},\n\t\t\t callback=self.check_login_response)",
"def login(self) -> int:\n r = self.session.post(\n self.api_endpoint,\n data={\n \"action\": \"login\",\n \"lgname\": self.user,\n \"lgpassword\": self.password,\n \"format\": \"json\",\n },\n )\n token = json.loads(r.text)[\"login\"][\"token\"]\n r = self.session.post(\n self.api_endpoint,\n data={\n \"action\": \"login\",\n \"lgname\": self.user,\n \"lgpassword\": self.password,\n \"lgtoken\": token,\n \"format\": \"json\",\n },\n )\n if json.loads(r.text)[\"login\"][\"result\"] != \"Success\":\n return -1\n return 0",
"def login(self):\n request = self.REQUEST\n response = request['RESPONSE']\n\n login = request.get('__ac_name', '')\n password = request.get('__ac_password', '')\n\n pas_instance = self._getPAS()\n\n if pas_instance is not None:\n pas_instance.updateCredentials(request, response, login, password)",
"def doLogin(self):\n\t\tlogin_data = urllib.urlencode({\n\t\t\t'operatorName' : self.username,\n\t\t\t'password' : self.password,\n\t\t\t'submit' : 'Iniciar+sesi%C3%B3n',\n\t\t})\n\n\t\tresponse = self.opener.open(\"http://172.16.0.2/tdserver/login_deal.jsp\", login_data)\t\t### deberia devolver verdadero o falso segun se logueo o no",
"def steam_login(username, password, two_factor_code):\n steam_client = SteamClient() # Make steam client object\n steam_client.login(username, password, two_factor_code=two_factor_code) # Log in\n if not steam_client.logged_on: # Login failed\n raise SteamLoginException('Login failed.')\n return steam_client"
] | [
"0.7269786",
"0.7269786",
"0.70734197",
"0.70426136",
"0.6852028",
"0.6803463",
"0.67843354",
"0.6739689",
"0.6730675",
"0.6720502",
"0.6683724",
"0.6669751",
"0.66376764",
"0.65688664",
"0.65634507",
"0.6525562",
"0.651685",
"0.651685",
"0.649879",
"0.64984405",
"0.6491229",
"0.64458877",
"0.64118016",
"0.64095795",
"0.6394115",
"0.6380953",
"0.6379928",
"0.6367268",
"0.63606215",
"0.6346407"
] | 0.74460435 | 0 |
View for all employees (in company) or for current user dependent on employee role | def all_employees(request, company_id=None):
current_employee = Employee.objects.get(user__pk=request.user.pk)
company_super_user = current_employee.isCompanySuperUserOrHigher()
if company_id:
company = Company.objects.get(pk=company_id)
else:
company = current_employee.company
if not current_employee.isEnsoUser() and current_employee.company.pk != company.pk:
raise PermissionDenied()
change_company_form = ChangeCompanyForm(initial=dict(company=company))
return TemplateResponse(
request,
'all_employees.html',
{
'user': request.user,
'company_super_user': company_super_user,
'company': company,
'change_company_form': change_company_form,
}
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def show_all_employees():\n if not g.user:\n flash(\"Please login to access\", \"danger\")\n return redirect(\"/\")\n if g.user.is_admin == False:\n flash (\"Unauthorized\", \"danger\")\n return redirect(\"/login\")\n\n employees = Employee.query.all()\n \n \n ## right now this is ALL users... \n \n return render_template(\"employee_display.html\", employees = employees)",
"def get_manager_employees(request):\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n manager_employees = Employee.objects.filter(manager=current_employee, development_plan_type=None).all()\n if manager_employees:\n emp_list=[]\n for emp in manager_employees:\n emp_data={}\n emp_data[\"id\"] = emp.id\n emp_data[\"username\"] = emp.user.username\n emp_data[\"first_name\"] = emp.user.first_name\n emp_data[\"last_name\"] = emp.user.last_name\n emp_data[\"manager_id\"] = emp.manager.id\n # emp_data[\"status_questions\"] = emp.status_questions\n # employee_role = EmployeeRole.objects.filter(employee=emp).all()\n # name_role_list = []\n # for obj in employee_role:\n # name_role_list.append(obj.role.name)\n # emp_data[\"roles\"] = name_role_list\n emp_list.append(emp_data)\n data = {\"employees:\": emp_list}\n return JsonResponse(status=201, data=data)\n else:\n return JsonResponse(\"The user with id={} isn't a manager for any user\".format(current_employee.user.id),\n status=404)",
"def get_queryset(self, request):\n return models.Employee.objects.exclude(username='root')",
"def employees():\n # gather data from db about all employees\n return render_template(\"employees.html\")",
"def show_all_employees(self):\n try:\n employees = self.admin_repository.show_all_employees()\n if employees:\n for employee in employees:\n print(\"Employee Id : {}\".format(employee[0]))\n print(\"Name : {}\".format(employee[1]))\n print(\"Email : {}\".format(employee[2]))\n print(\"----------------------------\")\n return True\n else:\n print(\"No records found.\")\n return False\n except Exception as e:\n print(\"Some Error occurred.Please try again\")\n return False",
"def index(request):\n\n context = {'employees': User.objects.select_related('profile').filter(is_staff=True).order_by('first_name')}\n return render(request, 'Employees/index.html', context)",
"def list(self, request):\n employee = self.controller.retrieve_all_employees()\n serializer = data_serializers.PresentEmployeeDataSerializer(employee, many=True)\n return Response(serializer.data)",
"def employee(employee_id):\n # gather data from db about all employees\n return render_template(\"employee.html\",\n employee_id=employee_id)",
"def current_employee(self, request: Request) -> Response:\n serializer = self.get_serializer_class()\n serializer = serializer(request.user, context={'request': request})\n return Response(serializer.data)",
"def get(self):\n resultado = EmployeeModel.query.all()\n return resultado",
"def edit_employee(request, employee_id):\n employee = Employee.objects.get(pk=int(employee_id))\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n\n assert isinstance(employee, Employee)\n assert isinstance(current_employee, Employee)\n\n # if not current_employee.isEnsoUser() and current_employee.company.pk != employee.company.pk:\n # raise PermissionDenied()\n\n if not current_employee.hasAccessTo(employee):\n raise PermissionDenied()\n\n form = EditEmployeeForm(request.user, employee, {\n 'first_name': employee.user.first_name,\n 'last_name': employee.user.last_name,\n 'email': employee.user.email,\n 'manager': employee.manager.id if employee.manager else 0,\n 'language_code': employee.language_code,\n # 'development_plan_type': employee.development_plan_type.id,\n 'is_manager': employee.is_manager\n })\n if 'manager' in form.fields:\n managerQS = Employee.objects.filter(is_manager=True, company__pk=employee.company.pk)\n form.fields['manager'].queryset = managerQS\n # form.fields['development_plan_type'].queryset = DevelopmentPlanType.objects.filter(\n # Q(company__pk=employee.company.pk) | Q(company__isnull=True)\n # )\n is_me = employee.user.pk == request.user.pk\n return TemplateResponse(\n request,\n 'mus/edit_employee_form.html',\n {\n 'edit_employee_form': form,\n 'employee_id': employee_id,\n 'me': is_me,\n 'name': employee.user.get_full_name()\n }\n )",
"def get_queryset(self):\n #print(\"request\", self.request)\n user = self.request.user\n return Experience.objects.filter(person=user)",
"def action_list(request, employee_id=None):\n if employee_id:\n employee = Employee.objects.get(pk=employee_id)\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n if not current_employee.isEnsoUser() and current_employee.company.pk != employee.company.pk:\n raise PermissionDenied()\n else:\n employee = request.user.employee_user.first()\n actions = employee.action_set.all()\n return TemplateResponse(\n request,\n 'mus/action_list.html',\n dict(\n actions=actions,\n employee=employee\n )\n )",
"def employee_detail(request, employee_id):\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n employee = Employee.objects.get(pk=int(employee_id))\n if not current_employee.isEnsoUser() and current_employee.company.pk != employee.company.pk:\n raise PermissionDenied()\n actions = employee.action_set.all()\n if not current_employee.pk == int(employee_id):\n if not current_employee.is_manager or not current_employee.company.pk == employee.company.pk:\n if not current_employee.isCompanySuperUserOrHigher():\n return HttpResponse('unauthorized', status=401)\n\n user_files = get_files_for_employee(employee_id)\n\n if request.method == 'POST':\n\n upload_form = UploadFileToEmployeyForm(request.POST, request.FILES)\n form = EmployeeNoteForm(request.POST, instance=employee)\n\n if 'upload' in request.POST:\n if upload_form.is_valid():\n upload_form.handle_upload(employee_id, request.FILES['file'])\n\n return HttpResponseRedirect('/employee/show/{}?upload_status=ok#file-list'.format(employee_id))\n\n else:\n if form.is_valid():\n form.save(request.user, employee)\n return HttpResponseRedirect('/employee/show/%d' % form.instance.pk)\n\n else:\n form = EmployeeNoteForm(instance=employee)\n upload_form = UploadFileToEmployeyForm()\n data = {}\n data[\"first_name\"] = employee.user.first_name\n data[\"last_name\"] = employee.user.last_name\n data[\"email\"] = employee.user.email\n data[\"is_manager\"] = employee.is_manager\n data[\"language_code\"] = employee.language_code\n employee_role = EmployeeRole.objects.filter(employee=employee).all()\n name_role_list = []\n for obj in employee_role:\n name_role_list.append(obj.role.name)\n data[\"roles\"] = name_role_list\n return JsonResponse(status=201, data=data)\n # return TemplateResponse(\n # request,\n # 'mus/detail.html',\n # {\n # 'actions': actions,\n # 'employee': employee,\n # # 'development_plans': development_plans,\n # 'form': form,\n # 'upload_form': upload_form,\n # 'user_files': user_files\n # }\n # )",
"def get_queryset(self):\n\n user = self.request.user\n\n if user.is_authenticated and user.role == 'LA':\n # check if the user is a landville admin and return all records\n # even soft deleted ones\n return PropertyEnquiry.objects.all()\n\n if user.is_authenticated and user.role == 'CA':\n # if the user is a client admin, return only his records\n employer = user.employer.first()\n return PropertyEnquiry.active_objects.for_client(client=employer)\n\n # if the user is a buyer, return also only his enquiries\n return PropertyEnquiry.active_objects.for_user(user=user)",
"def show_all_information():\n if not g.user:\n flash(\"Please login to access\", \"danger\")\n return redirect(\"/\")\n if g.user.is_admin == False:\n flash (\"Unauthorized\", \"danger\")\n return redirect(\"/login\")\n\n employees = Employee.query.all()\n all_certs = employee_certification.query.all()\n \n return render_template(\"admin.html\", employees = employees, all_certs = all_certs)",
"def show_overview_of_all_employees(self):\n\n print(\"OVERVIEW OF EMPLOYEES\\n\")\n\n employees_ob_list = self.llapi.get_employee_overview()\n \n for employee_ob in employees_ob_list:\n print(employee_ob.print_info_in_line(\"*\"))\n \n print(f\"\\nNAN AIR has {len(employees_ob_list)} employees\")\n\n print(\"\\nB Back\\n\")\n\n action_str = self.choose_action([\"b\"])\n while action_str == False:\n action_str = self.choose_action([\"b\"])\n\n if action_str == \"b\":\n return",
"def employee():\n return Response(render_template('employee/employee.html'))",
"def get_role_from_rolequeryset(self, role):\n role = super().get_role_from_rolequeryset(role=role)\n\n requesting_appname = self.request.cradmin_app.appname\n if requesting_appname in ['qualifiesforexam', 'overview_all_results']:\n if self.period_admin_access_semi_anonymous_assignments_restricted(period=role):\n raise Http404()\n\n return role",
"def list(self, request):\n varemployee = employee.objects.all()\n serializer = employeeSerializer(varemployee, many=True)\n return Response(serializer.data)",
"def get_queryset(self):\n qs = super().get_queryset()\n qs.filter(company=self.request.user.company)\n return qs",
"def stars_employee_list(request, employee_id):\n if request.method == 'GET':\n employee = get_object_or_404(Employee, pk=employee_id)\n employee_stars = Star.objects.filter(to_user=employee)\n paginator = PageNumberPagination()\n results = paginator.paginate_queryset(employee_stars, request)\n serializer = StarSerializer(results, many=True)\n return paginator.get_paginated_response(serializer.data)",
"def employees_manager(request):\n # current_employee = Employee.objects.get(user__pk=request.user.pk)\n manager_list = Employee.objects.filter(manager=request.user.employee_user, is_manager=True)\n employee = Employee.objects.get(pk=request.user.employee_user.id)\n employee_dict = model_to_dict(employee)\n employee_dict['first_name'] = employee.user.first_name\n employee_dict['last_name'] = employee.user.last_name\n employee_dict['photo'] = employee.photo.url if employee.photo else ''\n print employee_dict\n if len(manager_list) > 0:\n result_list = list(manager_list)\n all_managers_list = found_all_managers(manager_list, result_list)\n else:\n data = {\"employee_managers\": employee_dict}\n return JsonResponse(data=data, content_type='application/json', safe=False)\n employees = list()\n for manager in all_managers_list:\n manager_dict = model_to_dict(manager)\n manager_dict['first_name'] = manager.user.first_name\n manager_dict['last_name'] = manager.user.last_name\n manager_dict['photo'] = manager.photo.url if manager.photo else ''\n employees.append(manager_dict)\n employees.append(employee_dict)\n\n data = {\"employee_managers\": employees}\n return JsonResponse(data=data, content_type='application/json', safe=False)",
"def list(self, request):\n teams = self.controller.retrieve_all_teams_employees()\n serializer = data_serializers.PresentTeamEmployeeDataSerializer(teams, many=True)\n return Response(serializer.data)",
"def start_view(request):\n\n if request.user and Employee.objects.filter(user__pk=request.user.pk).exists():\n if Employee.objects.get(user__pk=request.user.pk).is_manager:\n return HttpResponseRedirect('/dashboard')\n else:\n return HttpResponseRedirect('/employee/show/%d/' % request.user.employee_user.first().pk)\n else:\n return HttpResponseRedirect('/login/')",
"def get_employee(self):\n employee_ids = self.env['hr.employee'].search([('user_id', '=', self.env.uid)])\n return employee_ids[0] if employee_ids else False",
"def get_employee_permissions(user_name: str, store_name: str, employee_name: str):\n user_name = auth.get_username_from_hash(user_name)\n permission_handler.is_permmited_to(user_name, Action.EMPLOYEE_PERMISSIONS, store_name)\n permission_handler.is_working_in_store(employee_name, store_name)\n return user_handler.get_employee_information(\n employee_name) # TODO FOR NOW RETURN INFORMATION MAYBE TO CHANGE TO NEW FUNCTION",
"def get_queryset(self):\n\n user = self.request.user\n\n if user.role == 'LA':\n return PropertyEnquiry.objects.all()\n\n # check if the user is a client admin\n # and return all enquiries made on his/her property\n if user.role == 'CA':\n return PropertyEnquiry.active_objects.for_client(\n client=user.employer.first())\n\n # else if the user is a buyer return only\n # the records that are associated with him/her\n return PropertyEnquiry.active_objects.for_user(user=user)",
"def get_employees(self, active_only):\n cursor = self.dbconnect.get_cursor()\n\n if active_only:\n cursor.execute(\n 'SELECT id, name, email, office, extra_info, picture_location, research_group, title, is_external, '\n 'is_admin, is_active FROM employee WHERE is_active = TRUE')\n else:\n cursor.execute(\n 'SELECT id, name, email, office, extra_info, picture_location, research_group, title, is_external, '\n 'is_admin, is_active FROM employee')\n\n employees = list()\n for row in cursor:\n obj = Employee(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8], row[9], row[10])\n employees.append(obj)\n return employees",
"def get_employees(self):\n return self.employees"
] | [
"0.75012994",
"0.676221",
"0.675863",
"0.66391176",
"0.659517",
"0.65067995",
"0.63182765",
"0.6227569",
"0.62267536",
"0.6176886",
"0.61206603",
"0.6090563",
"0.6077315",
"0.60686785",
"0.6056699",
"0.59416133",
"0.5920941",
"0.5915269",
"0.5865308",
"0.5852588",
"0.5848536",
"0.5825259",
"0.5797603",
"0.5784566",
"0.5777118",
"0.5758369",
"0.5735553",
"0.56972456",
"0.56829065",
"0.56693107"
] | 0.7280209 | 1 |
View for all employees current user is a manager for with empty development plan | def get_manager_employees(request):
current_employee = Employee.objects.get(user__pk=request.user.pk)
manager_employees = Employee.objects.filter(manager=current_employee, development_plan_type=None).all()
if manager_employees:
emp_list=[]
for emp in manager_employees:
emp_data={}
emp_data["id"] = emp.id
emp_data["username"] = emp.user.username
emp_data["first_name"] = emp.user.first_name
emp_data["last_name"] = emp.user.last_name
emp_data["manager_id"] = emp.manager.id
# emp_data["status_questions"] = emp.status_questions
# employee_role = EmployeeRole.objects.filter(employee=emp).all()
# name_role_list = []
# for obj in employee_role:
# name_role_list.append(obj.role.name)
# emp_data["roles"] = name_role_list
emp_list.append(emp_data)
data = {"employees:": emp_list}
return JsonResponse(status=201, data=data)
else:
return JsonResponse("The user with id={} isn't a manager for any user".format(current_employee.user.id),
status=404) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_all_user_development_plans_for_manager(request, employee_id):\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n user_development_plans = DevelopmentPlan.objects.filter(employee_relation=current_employee).all()\n employee = Employee.objects.filter(pk=int(employee_id)).first()\n\n if not current_employee:\n raise PermissionDenied(\"You don't have any employee assigned to you.\", 401)\n\n if not current_employee.isEnsoUser() and current_employee.is_manager:\n raise PermissionDenied()\n actions = employee.action_set.all()\n if not int(employee_id) in [obj.id for obj in Employee.objects.filter(manager=current_employee).all()]:\n raise PermissionDenied(\"Employee with id={} is not assigned to you.\".format(employee_id), 401)\n\n if user_development_plans:\n data={}\n user_development_plans_list = []\n for plan in user_development_plans:\n\n development_plan_object_list=[]\n dev_plan = {}\n\n dev_plan[\"id\"] = plan.id\n dev_plan[\"deleted\"] = plan.deleted\n if plan.type:\n dev_plan[\"type\"] = plan.type.name\n dev_plan[\"finished_at\"] = DevelopmentPlanToEmployeeRelation.objects\\\n .get(employee=current_employee, development_plan = plan).finished_at\n dev_plan[\"created_at\"] = plan.created_at\n dev_plan[\"created_by\"] = plan.created_by.username\n\n development_plan_object_list.append({\"dev_plan_details\":dev_plan})\n\n manager_data = {}\n manager_data[\"manager_username\"] = plan.manager_relation.user.username\n manager_data[\"id\"] = plan.manager_relation.user.id\n\n development_plan_object_list.append({\"manager_data\":manager_data})\n user_development_plans_list.append(development_plan_object_list)\n\n else:\n return JsonResponse(data={\"details\":\"Employee with id={} doesn't have any Development Plan\"\n .format(request.user.pk)}, status=404)\n\n data = {\"user_development_plans:\": user_development_plans_list}\n return JsonResponse(status=201, data=data)",
"def get_active_development_plan_for_user(request):\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n current_development_plan = DevelopmentPlan.objects.filter(\n employee_relation=current_employee,\n employee_relation__developmentplantoemployeerelation__finished_at__isnull=True).first() # is active !!!\n\n if not current_employee:\n raise PermissionDenied()\n\n if current_development_plan:\n data={}\n development_plan_object_list=[]\n dev_plan={}\n dev_plan[\"id\"] = current_development_plan.id\n dev_plan[\"deleted\"] = current_development_plan.deleted\n if current_development_plan.type:\n dev_plan[\"type\"] = current_development_plan.type.name\n dev_plan[\"finished_at\"] = DevelopmentPlanToEmployeeRelation.objects\\\n .get(employee=current_employee, development_plan = current_development_plan)\\\n .finished_at\n\n dev_plan[\"created_at\"] = current_development_plan.created_at\n dev_plan[\"created_by\"] = current_development_plan.created_by.username\n\n development_plan_object_list.append({\"dev_plan_details\":dev_plan})\n\n# manager_relation\n manager_data={}\n manager_data[\"manager_username\"] = current_development_plan.manager_relation.user.username\n manager_data[\"manager_first_name\"] = current_development_plan.manager_relation.user.first_name\n manager_data[\"manager_last_name\"] = current_development_plan.manager_relation.user.last_name\n development_plan_object_list.append({\"manager_data\":manager_data})\n\n# employee_relation\n employee_data={}\n all_employees = current_development_plan.employee_relation.all()\n if all_employees:\n emp_list=[]\n for emp in all_employees:\n emp_data={}\n emp_data[\"id\"] = emp.user.id\n emp_data[\"username\"] = emp.user.username\n emp_data[\"first_name\"] = emp.user.first_name\n emp_data[\"last_name\"] = emp.user.last_name\n emp_data[\"status_questions\"] = emp.status_questions\n\n employee_role = EmployeeRole.objects.filter(employee=emp).all()\n name_role_list = []\n for obj in employee_role:\n name_role_list.append(obj.role.name)\n emp_data[\"roles\"] = name_role_list\n emp_list.append(emp_data)\n employee_data={\"all_employees\":emp_list}\n else:\n return JsonResponse(data={\"details\":\"Any employee has Development Plan with id={}\"\n .format(current_development_plan.id)}, status=404)\n\n development_plan_object_list.append({\"employee_data\":employee_data})\n\n\n# competence_parts\n all_competence_parts = current_development_plan.competence_parts.all()\n\n competence_list = []\n questions_list = []\n sliders_list = []\n\n if all_competence_parts:\n for comp_part in all_competence_parts:\n\n comp_part_data={}\n competence_d={\"competence_parts\": []}\n\n comp_part_data[\"id\"] = comp_part.id\n comp_part_data[\"title\"] = comp_part.title\n comp_part_data[\"description\"] = comp_part.description\n comp_part_data[\"competence_status\"] = comp_part.competence_status\n\n all_questions = comp_part.question_set.all()\n print all_questions\n if all_questions:\n for question in all_questions:\n question_data = {}\n question_data[\"question_id\"] = question.id\n question_data[\"title\"] = question.title\n question_data[\"competence_part\"] = question.competence_part.id\n\n answer = Answer.objects.filter(question__id = question.id,\n employee=current_employee).first()\n\n if answer:\n question_data[\"answer_id\"] = answer.id\n question_data[\"answer\"] = answer.title\n\n questions_list.append(question_data)\n\n comp_part_data[\"questions\"] = questions_list\n\n all_sliders = comp_part.slider_set.all()\n if all_sliders:\n for slider in all_sliders:\n slider_data = {}\n slider_data[\"slider_id\"] = slider.id\n slider_data[\"scale\"] = slider.scale\n slider_data[\"competence_part\"] = slider.competence_part.id\n\n answer = Answer.objects.filter(slider__id = slider.id,\n employee=current_employee).first()\n\n if slider:\n slider_data[\"answer_id\"] = answer.id\n slider_data[\"answer\"] = answer.slider.scale\n\n sliders_list.append(slider_data)\n\n comp_part_data[\"sliders\"] = sliders_list\n\n comp_part_data[\"created_at\"] = comp_part.created_at\n comp_part_data[\"created_by\"] = comp_part.created_by.username\n comp_part_data[\"updated_at\"] = comp_part.updated_at\n comp_part_data[\"updated_by\"] = comp_part.updated_by.username\n\n competence_keys_list = ['id', 'title', 'description',\n 'language_code', 'status']\n\n if not competence_list:\n get_competence_data(competence_keys_list, comp_part.competence, competence_d,\n comp_part_data, competence_list)\n\n else:\n competence_found = False\n for competence_dict in competence_list:\n if competence_dict['id'] == comp_part.competence.id:\n competence_dict['competence_parts'].append(comp_part_data)\n competence_found = True\n break\n\n if not competence_found:\n get_competence_data(competence_keys_list, comp_part.competence, competence_d,\n comp_part_data, competence_list)\n\n development_plan_object_list.append({\"competences\":competence_list})\n\n else:\n return JsonResponse(data={\"details\":\"Development Plan with id={} doesn't have any Competence Part yet\"\n .format(current_development_plan.id)}, status=404)\n\n data = {\"dev_plan:\": development_plan_object_list}\n return JsonResponse(status=201, data=data)\n\n else:\n return JsonResponse(data={\"details\": \"The user with id={} doesn't have an active Development Plan\"\n .format(current_employee.user.id)}, status=404)",
"def get_queryset(self, request):\n return models.Employee.objects.exclude(username='root')",
"def show_all_employees():\n if not g.user:\n flash(\"Please login to access\", \"danger\")\n return redirect(\"/\")\n if g.user.is_admin == False:\n flash (\"Unauthorized\", \"danger\")\n return redirect(\"/login\")\n\n employees = Employee.query.all()\n \n \n ## right now this is ALL users... \n \n return render_template(\"employee_display.html\", employees = employees)",
"def get_all_development_plans_for_user(request):\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n user_development_plans = DevelopmentPlan.objects.filter(employee_relation=current_employee).all()\n\n if not current_employee:\n raise PermissionDenied(\"You don't have any employee assigned to you.\", 401)\n\n if user_development_plans:\n data={}\n user_development_plans_list = []\n for plan in user_development_plans:\n\n development_plan_object_list=[]\n dev_plan = {}\n\n dev_plan[\"id\"] = plan.id\n dev_plan[\"deleted\"] = plan.deleted\n if plan.type:\n dev_plan[\"type\"] = plan.type.name\n dev_plan[\"finished_at\"] = DevelopmentPlanToEmployeeRelation.objects\\\n .get(employee=current_employee, development_plan = plan).finished_at\n dev_plan[\"created_at\"] = plan.created_at\n dev_plan[\"created_by\"] = plan.created_by.username\n\n development_plan_object_list.append({\"dev_plan_details\":dev_plan})\n\n manager_data = {}\n manager_data[\"manager_username\"] = plan.manager_relation.user.username\n manager_data[\"id\"] = plan.manager_relation.user.id\n\n development_plan_object_list.append({\"manager_data\":manager_data})\n user_development_plans_list.append(development_plan_object_list)\n\n else:\n return JsonResponse(data={\"details\":\"Employee with id={} doesn't have any Development Plan\"\n .format(request.user.pk)}, status=404)\n\n data = {\"user_development_plans:\": user_development_plans_list}\n return JsonResponse(status=201, data=data)",
"def show_all_employees(self):\n try:\n employees = self.admin_repository.show_all_employees()\n if employees:\n for employee in employees:\n print(\"Employee Id : {}\".format(employee[0]))\n print(\"Name : {}\".format(employee[1]))\n print(\"Email : {}\".format(employee[2]))\n print(\"----------------------------\")\n return True\n else:\n print(\"No records found.\")\n return False\n except Exception as e:\n print(\"Some Error occurred.Please try again\")\n return False",
"def test_ReportingPeriodDetailView_current_employee_set_false(self):\n response = self.app.get(\n reverse(\n 'reports:ReportingPeriodDetailView',\n kwargs={'reporting_period': '2015-01-01'},\n )\n )\n self.assertEqual(\n len(response.html.find_all('tr', {'class': 'user'})), 2\n )",
"def employees_manager(request):\n # current_employee = Employee.objects.get(user__pk=request.user.pk)\n manager_list = Employee.objects.filter(manager=request.user.employee_user, is_manager=True)\n employee = Employee.objects.get(pk=request.user.employee_user.id)\n employee_dict = model_to_dict(employee)\n employee_dict['first_name'] = employee.user.first_name\n employee_dict['last_name'] = employee.user.last_name\n employee_dict['photo'] = employee.photo.url if employee.photo else ''\n print employee_dict\n if len(manager_list) > 0:\n result_list = list(manager_list)\n all_managers_list = found_all_managers(manager_list, result_list)\n else:\n data = {\"employee_managers\": employee_dict}\n return JsonResponse(data=data, content_type='application/json', safe=False)\n employees = list()\n for manager in all_managers_list:\n manager_dict = model_to_dict(manager)\n manager_dict['first_name'] = manager.user.first_name\n manager_dict['last_name'] = manager.user.last_name\n manager_dict['photo'] = manager.photo.url if manager.photo else ''\n employees.append(manager_dict)\n employees.append(employee_dict)\n\n data = {\"employee_managers\": employees}\n return JsonResponse(data=data, content_type='application/json', safe=False)",
"def development_plan_details(request, development_plan_id): #, employee_id ):\n # employee = Employee.objects.get(user__pk=request.user.pk)\n # employee = Employee.objects.filter(pk=int(employee_id)).first()\n\n development_plan = DevelopmentPlan.objects.get(pk=int(development_plan_id))\n current_employee = Employee.objects.filter(user__pk=request.user.pk).first()\n all_employees = development_plan.employee_relation.all()\n\n try:\n development_plan = DevelopmentPlan.objects.get(pk=int(development_plan_id))\n data={}\n development_plan_object_list=[]\n dev_plan={}\n dev_plan[\"id\"] = development_plan.id\n dev_plan[\"deleted\"] = development_plan.deleted\n if development_plan.type:\n dev_plan[\"type\"] = development_plan.type.name\n # dev_plan[\"finished_at\"] = DevelopmentPlanToEmployeeRelation.objects.get(development_plan = development_plan)\\\n # .finished_at\n\n dev_plan[\"created_at\"] = development_plan.created_at\n dev_plan[\"created_by\"] = development_plan.created_by.username\n\n development_plan_object_list.append({\"dev_plan_details\":dev_plan})\n\n# manager_relation\n manager_data={}\n manager_data[\"manager_username\"] = development_plan.manager_relation.user.username\n manager_data[\"manager_first_name\"] = development_plan.manager_relation.user.first_name\n manager_data[\"manager_last_name\"] = development_plan.manager_relation.user.last_name\n development_plan_object_list.append({\"manager_data\":manager_data})\n\n# employee_relation\n employee_data={}\n all_employees = development_plan.employee_relation.all()\n if all_employees:\n emp_list=[]\n for emp in all_employees:\n emp_data={}\n emp_data[\"id\"] = emp.user.id\n emp_data[\"username\"] = emp.user.username\n emp_data[\"first_name\"] = emp.user.first_name\n emp_data[\"last_name\"] = emp.user.last_name\n emp_data[\"status_questions\"] = emp.status_questions\n\n emp_data[\"dev_plan_finished_at\"] = DevelopmentPlanToEmployeeRelation\\\n .objects.get(employee=emp,\n development_plan = development_plan)\\\n .finished_at\n\n employee_role = EmployeeRole.objects.filter(employee=emp).all()\n name_role_list = []\n for obj in employee_role:\n name_role_list.append(obj.role.name)\n emp_data[\"roles\"] = name_role_list\n emp_list.append(emp_data)\n employee_data={\"all_employees\":emp_list}\n else:\n return JsonResponse(data={\"details\":\"Any employee has Development Plan with id={}\"\n .format(development_plan.id)}, status=404)\n\n development_plan_object_list.append({\"employee_data\":employee_data})\n\n\n# competence_parts\n all_competence_parts = development_plan.competence_parts.all()\n\n competence_list = []\n questions_list = []\n sliders_list = []\n\n if all_competence_parts:\n for comp_part in all_competence_parts:\n\n comp_part_data={}\n competence_d={\"competence_parts\": []}\n\n comp_part_data[\"id\"] = comp_part.id\n comp_part_data[\"title\"] = comp_part.title\n comp_part_data[\"description\"] = comp_part.description\n comp_part_data[\"competence_status\"] = comp_part.competence_status\n\n all_questions = comp_part.question_set.all()\n if all_questions:\n for question in all_questions:\n question_data = {}\n question_data[\"question_id\"] = question.id\n question_data[\"title\"] = question.title\n question_data[\"competence_part\"] = question.competence_part.id\n\n answer = Answer.objects.filter(question__id = question.id).first() #employee=current_employee\n\n if answer:\n question_data[\"answer_id\"] = answer.id\n question_data[\"answer\"] = answer.title\n\n questions_list.append(question_data)\n\n comp_part_data[\"questions\"] = questions_list\n\n all_sliders = comp_part.slider_set.all()\n if all_sliders:\n for slider in all_sliders:\n slider_data = {}\n slider_data[\"slider_id\"] = slider.id\n slider_data[\"scale\"] = slider.scale\n slider_data[\"competence_part\"] = slider.competence_part.id\n\n answer = Answer.objects.filter(slider__id = slider.id).first() #employee=current_employee\n\n if slider:\n slider_data[\"answer_id\"] = answer.id\n slider_data[\"answer\"] = answer.slider.scale\n\n sliders_list.append(slider_data)\n\n comp_part_data[\"sliders\"] = sliders_list\n\n comp_part_data[\"created_at\"] = comp_part.created_at\n comp_part_data[\"created_by\"] = comp_part.created_by.username\n comp_part_data[\"updated_at\"] = comp_part.updated_at\n comp_part_data[\"updated_by\"] = comp_part.updated_by.username\n\n competence_keys_list = ['id', 'title', 'description',\n 'language_code', 'status']\n\n if not competence_list:\n get_competence_data(competence_keys_list, comp_part.competence, competence_d,\n comp_part_data, competence_list)\n else:\n competence_found = False\n for competence_dict in competence_list:\n if competence_dict['id'] == comp_part.competence.id:\n competence_dict['competence_parts'].append(comp_part_data)\n competence_found = True\n break\n\n if not competence_found:\n get_competence_data(competence_keys_list, comp_part.competence, competence_d,\n comp_part_data, competence_list)\n\n development_plan_object_list.append({\"competences\":competence_list})\n\n else:\n return JsonResponse(data={\"details\":\"Development Plan with id={} doesn't have any Competence Part yet\"\n .format(development_plan.id)}, status=404)\n\n data = {\"dev_plan:\": development_plan_object_list}\n return JsonResponse(status=201, data=data)\n\n except DevelopmentPlan.DoesNotExist:\n return JsonResponse(data={\"details\":\"Development Plan with this id doesn't exist\"}, status=404)",
"def start_view(request):\n\n if request.user and Employee.objects.filter(user__pk=request.user.pk).exists():\n if Employee.objects.get(user__pk=request.user.pk).is_manager:\n return HttpResponseRedirect('/dashboard')\n else:\n return HttpResponseRedirect('/employee/show/%d/' % request.user.employee_user.first().pk)\n else:\n return HttpResponseRedirect('/login/')",
"def is_manager(self) -> bool:\n return self.role in EmployeeRole.manager_roles()",
"def is_employee():\n return _is_member('uw_employee')",
"def test_managers_who_does_nothing(self):\n # Add 2 managers who do nothing\n self.manager_id = self._add_person(\"Manager\", \"ARRAY['Database']\", 30)\n self.manager_id1 = self._add_person(\"Manager\", \"ARRAY['AI']\", 30)\n\n # Run the query\n q = self.generate_query('view_manager_report', ())\n res = self.execute_query(q)\n assert len(res) == 2, f'There is suppose to be 2 entries {res}'",
"def edit_employee(request, employee_id):\n employee = Employee.objects.get(pk=int(employee_id))\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n\n assert isinstance(employee, Employee)\n assert isinstance(current_employee, Employee)\n\n # if not current_employee.isEnsoUser() and current_employee.company.pk != employee.company.pk:\n # raise PermissionDenied()\n\n if not current_employee.hasAccessTo(employee):\n raise PermissionDenied()\n\n form = EditEmployeeForm(request.user, employee, {\n 'first_name': employee.user.first_name,\n 'last_name': employee.user.last_name,\n 'email': employee.user.email,\n 'manager': employee.manager.id if employee.manager else 0,\n 'language_code': employee.language_code,\n # 'development_plan_type': employee.development_plan_type.id,\n 'is_manager': employee.is_manager\n })\n if 'manager' in form.fields:\n managerQS = Employee.objects.filter(is_manager=True, company__pk=employee.company.pk)\n form.fields['manager'].queryset = managerQS\n # form.fields['development_plan_type'].queryset = DevelopmentPlanType.objects.filter(\n # Q(company__pk=employee.company.pk) | Q(company__isnull=True)\n # )\n is_me = employee.user.pk == request.user.pk\n return TemplateResponse(\n request,\n 'mus/edit_employee_form.html',\n {\n 'edit_employee_form': form,\n 'employee_id': employee_id,\n 'me': is_me,\n 'name': employee.user.get_full_name()\n }\n )",
"def get_job_query(self):\n context = aq_inner(self.context)\n catalog = getToolByName(context, 'portal_catalog')\n mt = getToolByName(self, 'portal_membership') \n currentUser = mt.getAuthenticatedMember() \n \n if \"Site Administrators\" not in currentUser.getGroups():\n\treturn catalog.searchResults(portal_type= 'SeniorProject.PloneAddOn.job', \t\t\t\t Creator = currentUser.getUserName())\n else: \n return catalog.searchResults(portal_type= 'SeniorProject.PloneAddOn.job')",
"def index(request):\n\n context = {'employees': User.objects.select_related('profile').filter(is_staff=True).order_by('first_name')}\n return render(request, 'Employees/index.html', context)",
"def show_all_information():\n if not g.user:\n flash(\"Please login to access\", \"danger\")\n return redirect(\"/\")\n if g.user.is_admin == False:\n flash (\"Unauthorized\", \"danger\")\n return redirect(\"/login\")\n\n employees = Employee.query.all()\n all_certs = employee_certification.query.all()\n \n return render_template(\"admin.html\", employees = employees, all_certs = all_certs)",
"def test_ReportingPeriodDetailView_current_employee_toggle(self):\n self.former_employee.user_data.current_employee = True\n self.former_employee.user_data.save()\n response = self.app.get(\n reverse(\n 'reports:ReportingPeriodDetailView',\n kwargs={'reporting_period': '2015-01-01'},\n )\n )\n self.assertEqual(\n len(response.html.find_all('tr', {'class': 'user'})), 3\n )\n self.former_employee",
"def get_queryset(self, request):\n qs = super(EventAdmin, self).get_queryset(request)\n if request.user.is_superuser:\n return qs\n return qs.filter(dep=request.user.profile.department)",
"def test_returns_200_if_user_org_manager(self):\n # Add user to organisation so that it has PM role\n add_manager_to_organisation(self.test_organisation, self.test_author)\n # Act\n response = self.client.get(\n self.url, headers={\"Authorization\": self.author_session_token}\n )\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(response.json[\"activeProjects\"]), 1)",
"def get_queryset(self):\n\n user = self.request.user\n\n if user.is_authenticated and user.role == 'LA':\n # check if the user is a landville admin and return all records\n # even soft deleted ones\n return PropertyEnquiry.objects.all()\n\n if user.is_authenticated and user.role == 'CA':\n # if the user is a client admin, return only his records\n employer = user.employer.first()\n return PropertyEnquiry.active_objects.for_client(client=employer)\n\n # if the user is a buyer, return also only his enquiries\n return PropertyEnquiry.active_objects.for_user(user=user)",
"def employees():\n # gather data from db about all employees\n return render_template(\"employees.html\")",
"def user(self, request):\n\t\t#return super(configManager, self).get_queryset().filter(models.Q(author=request.user) | models.Q(is_public=True)) ## For public showing, temporarily disabled\n\t\treturn super(configManager, self).get_queryset().filter(author=request.user)",
"def filter_queryset(self, queryset):\n user = self.request.user\n if user.is_superuser:\n return super().filter_queryset(queryset)\n return queryset.filter(collaborators=user)",
"def allow_egap_admins(queryset, request):\n if hasattr(request, 'user') and not waffle.flag_is_active(request, EGAP_ADMINS):\n return queryset.exclude(name='EGAP Registration')\n return queryset",
"def show_overview_of_all_employees(self):\n\n print(\"OVERVIEW OF EMPLOYEES\\n\")\n\n employees_ob_list = self.llapi.get_employee_overview()\n \n for employee_ob in employees_ob_list:\n print(employee_ob.print_info_in_line(\"*\"))\n \n print(f\"\\nNAN AIR has {len(employees_ob_list)} employees\")\n\n print(\"\\nB Back\\n\")\n\n action_str = self.choose_action([\"b\"])\n while action_str == False:\n action_str = self.choose_action([\"b\"])\n\n if action_str == \"b\":\n return",
"def admin(request):\n if not request.user.is_staff:\n return render(request, 'manager/denied.html')\n return render(request, 'manager/index.html')",
"def is_usermanager(self):\n return False",
"def get_employee(self):\n employee_ids = self.env['hr.employee'].search([('user_id', '=', self.env.uid)])\n return employee_ids[0] if employee_ids else False",
"def employees_json(request):\n # current_employee = Employee.objects.get(user__pk=request.user.pk)\n employee_list = Employee.objects.filter(manager=request.user.employee_user)\n employees = list()\n for employee in employee_list:\n manager_dict = model_to_dict(employee)\n manager_dict['first_name'] = employee.user.first_name\n manager_dict['last_name'] = employee.user.last_name\n employees.append(manager_dict)\n data = {\"employees\": employees}\n return JsonResponse(data=data, content_type='application/json', safe=False)"
] | [
"0.7106553",
"0.6753194",
"0.674842",
"0.6595502",
"0.6566336",
"0.63812053",
"0.61806154",
"0.61537486",
"0.612033",
"0.60830206",
"0.60527897",
"0.60266685",
"0.596054",
"0.5893149",
"0.5841044",
"0.57119644",
"0.56820273",
"0.56635785",
"0.5655162",
"0.5615645",
"0.5613845",
"0.5602607",
"0.5572919",
"0.554382",
"0.5536185",
"0.55342567",
"0.55089074",
"0.54941607",
"0.54308563",
"0.542971"
] | 0.73281884 | 0 |
View for creating employee in company | def create_employee(request, company_id):
company = Company.objects.get(pk=company_id)
current_employee = Employee.objects.get(user__pk=request.user.pk)
if not current_employee.isEnsoUser() and current_employee.company.pk != company.pk:
logUnauthorizedAccess("User tried to create_employee", request)
raise PermissionDenied()
form = EmployeeForm(request, initial=dict(company=company))
form.fields['manager'].queryset = Employee.objects.filter(is_manager=True, company=company)
# form.fields['development_plan_type'].queryset = DevelopmentPlanType.objects.filter(
# Q(company=company) | Q(company__isnull=True))
# data = {
# 'employee_form': form.cleaned_data,
# 'company': company.cleaned_data["name"]
# }
return TemplateResponse(
request,
'mus/create_employee_form.html',
{
'employee_form': form,
}
)
# data = {
# 'employee_form': form.cleaned_data,
# 'company': company.cleaned_data["name"]
# }
# return JsonResponse(status=200, data=data) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_leader_model(request, company_id):\n\n errors = {'noactions': []}\n company = Company.objects.get(pk=company_id)\n currentEmpl = Employee.objects.get(user__pk=request.user.pk)\n \"\"\":type : Employee \"\"\"\n\n if not currentEmpl.isEnsoUser() and currentEmpl.company.pk != company.pk:\n raise PermissionDenied()\n\n if currentEmpl.isCompanySuperUserOrHigher():\n employeeQS = Employee.objects.filter(\n company__pk=company_id\n )\n else:\n employeeQS = Employee.objects.filter(\n Q(manager=currentEmpl),\n company__pk=company_id\n )\n\n form = MultiLeaderModelForm(request.POST or None)\n form.fields['employees'].queryset = employeeQS\n\n if request.method == 'POST' and form.is_valid():\n\n employees = form.cleaned_data['employees']\n \"\"\":type : list[Employee] \"\"\"\n\n pdf_response = get_leader_model_pdf(currentEmpl, employees)\n\n if isinstance(pdf_response, HttpResponse):\n return pdf_response\n else:\n errors = pdf_response\n\n print(errors)\n\n return TemplateResponse(\n request,\n 'mus/create_leader_model.html', {\n 'form': form,\n 'company': company,\n 'errors': errors\n }\n )",
"def employee():\n return Response(render_template('employee/employee.html'))",
"def office_create(request, slug):\n #verifies if the company exists if not returns a 404 page\n company =get_object_or_404(Company,slug=slug)\n edit = validate_user_company_access_or_redirect(request,company)\n #if the request is GET presents empty form\n if request.method == 'GET':\n\n office_form = OfficeForm()\n return render_to_response('office_form.html', {'form': office_form, 'company':company},\n context_instance=RequestContext(request))\n \n else:\n office_form = OfficeForm(request.POST)\n #if is POST Validates the form is well filled and save it redirecting to the company page\n if office_form.is_valid():\n of = office_form.save(commit=False)\n of.company = company\n of.save()\n return HttpResponseRedirect('/company/'+str(slug))\n\n #if not well filled redirect to the original create and display error\n else:\n return render_to_response('office_form.html', \n {'form': office_form, 'form_errors': office_form.errors, 'company':company},\n context_instance=RequestContext(request))",
"def management_create(request, slug):\n #verifies if the company exists if not returns a 404 page\n company =get_object_or_404(Company,slug=slug)\n\n #verifies the person has access to the company or is an incubator employee\n edit = validate_user_company_access_or_redirect(request,company)\n\n #if the request is GET presents empty form\n if request.method == 'GET':\n\n management_form = ManagementForm()\n return render_to_response('management_form.html', {'form': management_form, 'company':company},\n context_instance=RequestContext(request))\n \n else:\n management_form = ManagementForm(request.POST)\n #if is POST Validates the form is well filled and save it redirecting to the company page\n if management_form.is_valid():\n mf = management_form.save(commit=False)\n mf.company = company\n mf.save()\n return HttpResponseRedirect('/company/'+str(slug))\n\n #if not well filled redirect to the original create and display error\n else:\n return render_to_response('management_form.html', \n {'form': management_form, 'form_errors': management_form.errors, 'company':company},\n context_instance=RequestContext(request))",
"def company():\n\n company = Company.objects.create(name='Tre G.M.B.H.', country='Germany')\n return company",
"def create_many_employees(request, company_id=None):\n company = Company.objects.get(pk=company_id)\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n if not current_employee.isEnsoUser() and current_employee.company.pk != company.pk:\n raise PermissionDenied()\n if \"upload\" in request.POST:\n form = UploadEmployeesForm(request.POST, request.FILES)\n if form.is_valid():\n data = csv_to_dict(request.FILES['file'])\n request.session['upload_employees'] = data\n return JsonResponse(status=201, data=form.cleaned_data)\n # return TemplateResponse(\n # request,\n # 'mus/create_many_employees_uploaded.html',\n # dict(data=data, company=company)\n # )\n elif \"next\" in request.POST:\n data = request.session['upload_employees']\n marked_data = list()\n fields = request.POST.getlist('field[]')\n for row in data:\n new_row = dict(is_manager=False)\n for i, item in enumerate(row):\n field_id = int(fields[i])\n if field_id == 1:\n new_row['first_name'] = item\n elif field_id == 2:\n new_row['last_name'] = item\n elif field_id == 3:\n p = item.partition(\" \")\n new_row['first_name'] = p[0]\n new_row['last_name'] = p[2]\n elif field_id == 4:\n new_row['email'] = item\n elif field_id == 5:\n new_row['username'] = item\n marked_data.append(new_row)\n formset = EmployeeRowFormSet(initial=marked_data)\n # TypeQS = DevelopmentPlanType.objects.filter(Q(company=company) | Q(company__isnull=True))\n # for form in formset:\n # form.fields['development_plan_type'].queryset = TypeQS\n return TemplateResponse(\n request,\n 'mus/create_many_employees_form.html',\n dict(formset=formset, company=company)\n )\n elif \"next2\" in request.POST:\n formset = EmployeeRowFormSet(request.POST)\n if formset.is_valid():\n data = list()",
"def create(self, request):\n serializer = data_serializers.CreateEmployeeSerializer(data=request.data)\n\n if serializer.is_valid(raise_exception=True):\n request_data = serializer.save()\n print(F\"Request employee Data: {serializer.data}\")\n\n try:\n new_employee = self.controller.create_employee(request_data=request_data)\n serializer = data_serializers.PresentEmployeeDataSerializer(new_employee)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n except (domain_exceptions.EmployeeIDIsNotUnique,\n domain_exceptions.WorkArrangementPercentageOutOfRange,\n domain_exceptions.TeamDoesNotExist,\n domain_exceptions.TeamHasALeader,\n domain_exceptions.WorkArrangementPercentageNull\n ) as e:\n return Response(e.message, status=status.HTTP_400_BAD_REQUEST)\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)",
"def create_employee(self):\n try:\n name = input(\"Enter name: \")\n if not name.isalpha():\n print(\"Invalid data format. Name should contain only alphabets. \")\n return False\n email = input(\"Enter email: \")\n if not InputValidations.validate_email(email):\n return False\n employee = EmployeeModel(name=name, email=email)\n self.admin_repository.create_employee(employee)\n print(\"Employee created successfully!\")\n return True\n except Exception as e:\n print(\"Some Error occurred.Please try again\")\n return False",
"def employees():\n # gather data from db about all employees\n return render_template(\"employees.html\")",
"def create_calendar(request):\n if request.method == 'POST':\n\n form = CalendarForm(request.POST)\n \n if form.is_valid():\n calendar = form.save(commit=False) # prvent form from saving since we need to link company\n calendar.company = request.user.company\n calendar.save()\n return redirect('appointment:calendar_list')\n else:\n form = CalendarForm()\n return render(request, 'calendar_form.html', {'form': form})",
"def competitors_create(request, slug):\n #verifies if the company exists if not returns a 404 page\n company =get_object_or_404(Company,slug=slug)\n\n #verifies the person has access to the company or is an incubator employee\n edit = validate_user_company_access_or_redirect(request,company)\n\n #if the request is GET presents empty form\n if request.method == 'GET':\n\n competitors_form = CompetitorsForm()\n return render_to_response('competitors_form.html', {'form': competitors_form, 'company':company},\n context_instance=RequestContext(request))\n \n else:\n competitors_form = CompetitorsForm(request.POST)\n #if is POST Validates the form is well filled and save it redirecting to the company page\n if competitors_form.is_valid():\n cf = competitors_form.save(commit=False)\n\n #verify if other companies with the same info exists anywhere\n try: \n comparison = Competitors.objects.get(name=cf.name,company= company)\n \n if str(comparison.name) != str(cf.name):\n cf.company = company\n cf.save()\n \n else:\n form_errors = {\"Name - The competitor \" + str(comparison.name).capitalize() + \" has been already created for \"+ str(company.name).capitalize() + \".\"}\n return render_to_response('competitors_form.html', \n {'form': competitors_form, 'form_errors': form_errors, 'company':company},\n context_instance=RequestContext(request))\n\n except Competitors.DoesNotExist :\n cf.company = company\n cf.save()\n\n\n \n return HttpResponseRedirect('/company/'+str(slug))\n\n #if not well filled redirect to the original create and display error\n else:\n return render_to_response('competitors_form.html', \n {'form': competitors_form, 'form_errors': competitors_form.errors, 'company':company},\n context_instance=RequestContext(request))",
"def all_employees(request, company_id=None):\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n company_super_user = current_employee.isCompanySuperUserOrHigher()\n if company_id:\n company = Company.objects.get(pk=company_id)\n else:\n company = current_employee.company\n if not current_employee.isEnsoUser() and current_employee.company.pk != company.pk:\n raise PermissionDenied()\n change_company_form = ChangeCompanyForm(initial=dict(company=company))\n return TemplateResponse(\n request,\n 'all_employees.html',\n {\n 'user': request.user,\n 'company_super_user': company_super_user,\n 'company': company,\n 'change_company_form': change_company_form,\n }\n )",
"def post(self):\n try:\n employee = self.service.add_employee(self.schema, request.json)\n except ValidationError as error:\n return error.messages, 400\n return self.schema.dump(employee), 201",
"def index(request):\n return render(request, 'companies/index.html', {'companyform': CompanyForm()})",
"def test_new_employee_crud_methods(self):\n response = self.client.get(\n '/employees/', kwargs={'employer_id': self.employee.id})\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(Employee.objects.all()), 1)\n\n # Test that a new employee can be added\n response = self.client.post(\n '/employees/',\n {'name': 'MAdtraxx!!', 'employer': self.employer.id},\n kwargs={'pk': self.employer.id})\n self.assertEqual(response.status_code, 201)\n self.assertEqual(Employee.objects.count(), 2)\n\n # Test that employee info may be edited\n response = self.client.put('/employees/1/',\n {'name': 'Ashley',\n 'employer': self.employer.id},\n kwargs={'employer_id': self.employee.id,\n 'pk': self.employee.id})\n self.assertEqual(response.status_code, 200)",
"def get(self,request,*args,**kwargs):\n\n\t\tsucursal = Sucursal.objects.get(id=kwargs['spk'])\n\n\t\tuser_form = UserForm()\n\t\templeado_form = EmpleadoForm( initial={'sucursal':sucursal.id} )\n\n\t\tforms = [user_form,empleado_form]\n\t\tcontext = {\n\t\t'section_title':'Nuevo Empleado',\n\t\t'button_text':'Crear',\n\t\t'sucursal':sucursal,\n\t\t'user_form':user_form,\n\t\t'empleado_form':empleado_form }\n\n\t\treturn render_to_response(\n\t\t\t'empleado/empleado_form.html',\n\t\t\tcontext,\n\t\t\tcontext_instance=RequestContext(request))",
"def certification_create(request, slug):\n #verifies if the company exists if not returns a 404 page\n company =get_object_or_404(Company,slug=slug)\n edit = validate_user_company_access_or_redirect(request,company)\n #if the request is GET presents empty form\n if request.method == 'GET':\n\n certification_form = CertificationForm()\n return render_to_response('certification_form.html', {'form': certification_form, 'company':company},\n context_instance=RequestContext(request))\n \n else:\n certification_form = CertificationForm(request.POST)\n #if is POST Validates the form is well filled and save it redirecting to the company page\n if certification_form.is_valid():\n of = certification_form.save(commit=False)\n of.company = company\n of.save()\n return HttpResponseRedirect('/company/'+str(slug))\n\n #if not well filled redirect to the original create and display error\n else:\n return render_to_response('certification_form.html', \n {'form': certification_form, 'form_errors': certification_form.errors, 'company':company},\n context_instance=RequestContext(request))",
"def employee_detail(request, employee_id):\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n employee = Employee.objects.get(pk=int(employee_id))\n if not current_employee.isEnsoUser() and current_employee.company.pk != employee.company.pk:\n raise PermissionDenied()\n actions = employee.action_set.all()\n if not current_employee.pk == int(employee_id):\n if not current_employee.is_manager or not current_employee.company.pk == employee.company.pk:\n if not current_employee.isCompanySuperUserOrHigher():\n return HttpResponse('unauthorized', status=401)\n\n user_files = get_files_for_employee(employee_id)\n\n if request.method == 'POST':\n\n upload_form = UploadFileToEmployeyForm(request.POST, request.FILES)\n form = EmployeeNoteForm(request.POST, instance=employee)\n\n if 'upload' in request.POST:\n if upload_form.is_valid():\n upload_form.handle_upload(employee_id, request.FILES['file'])\n\n return HttpResponseRedirect('/employee/show/{}?upload_status=ok#file-list'.format(employee_id))\n\n else:\n if form.is_valid():\n form.save(request.user, employee)\n return HttpResponseRedirect('/employee/show/%d' % form.instance.pk)\n\n else:\n form = EmployeeNoteForm(instance=employee)\n upload_form = UploadFileToEmployeyForm()\n data = {}\n data[\"first_name\"] = employee.user.first_name\n data[\"last_name\"] = employee.user.last_name\n data[\"email\"] = employee.user.email\n data[\"is_manager\"] = employee.is_manager\n data[\"language_code\"] = employee.language_code\n employee_role = EmployeeRole.objects.filter(employee=employee).all()\n name_role_list = []\n for obj in employee_role:\n name_role_list.append(obj.role.name)\n data[\"roles\"] = name_role_list\n return JsonResponse(status=201, data=data)\n # return TemplateResponse(\n # request,\n # 'mus/detail.html',\n # {\n # 'actions': actions,\n # 'employee': employee,\n # # 'development_plans': development_plans,\n # 'form': form,\n # 'upload_form': upload_form,\n # 'user_files': user_files\n # }\n # )",
"def customer_create(request, slug):\n #verifies if the company exists if not returns a 404 page\n company =get_object_or_404(Company,slug=slug)\n edit = validate_user_company_access_or_redirect(request,company)\n #if the request is GET presents empty form\n if request.method == 'GET':\n\n customer_form = CustomerForm()\n return render_to_response('customer_form.html', {'form': customer_form, 'company':company},\n context_instance=RequestContext(request))\n \n else:\n customer_form = CustomerForm(request.POST)\n #if is POST Validates the form is well filled and save it redirecting to the company page\n if customer_form.is_valid():\n of = customer_form.save(commit=False)\n of.company = company\n of.save()\n return HttpResponseRedirect('/company/'+str(slug))\n\n #if not well filled redirect to the original create and display error\n else:\n return render_to_response('customer_form.html', \n {'form': customer_form, 'form_errors': customer_form.errors, 'company':company},\n context_instance=RequestContext(request))",
"def employee(employee_id):\n # gather data from db about all employees\n return render_template(\"employee.html\",\n employee_id=employee_id)",
"def create_employee_from_applicant(self, cr, uid, ids, context=None):\n if context is None:\n context = {}\n hr_employee = self.pool.get('hr.employee')\n model_data = self.pool.get('ir.model.data')\n act_window = self.pool.get('ir.actions.act_window')\n emp_id = False\n for applicant in self.browse(cr, uid, ids, context=context):\n address_id = contact_name = False\n if applicant.partner_id:\n address_id = self.pool.get('res.partner').address_get(cr, uid, [applicant.partner_id.id], ['contact'])['contact']\n contact_name = self.pool.get('res.partner').name_get(cr, uid, [applicant.partner_id.id])[0][1]\n if applicant.job_id and (applicant.partner_name or contact_name):\n applicant.job_id.write({'no_of_hired_employee': applicant.job_id.no_of_hired_employee + 1})\n create_ctx = dict(context, mail_broadcast=True)\n\n pes=self.browse(cr,uid,ids)[0]\n coy=pes.partner_name\n\n ##### Susunan Keluarga ayah/ibu #####\n le=self.pool.get('hr_recruit.suskel1')\n lel=le.search(cr,uid,[('applicant_id','=',coy)])\n lele=le.browse(cr,uid,lel,context=context)\n prod_ids=[] \n for pr in lele:\n prod_ids.append((0,0, {'name':pr.name,'kelamin':pr.kelamin,'kota_id':pr.kota_id.id,'tgl_lahir':pr.tgl_lahir,'type_id':pr.type_id.id,'pekerjaan':pr.pekerjaan,'susunan':pr.susunan}))\n \n ###### Susunan Keluarga Suami/istri #####\n le=self.pool.get('hr_recruit.suskel2')\n lel=le.search(cr,uid,[('applicant_id','=',coy)])\n lele=le.browse(cr,uid,lel,context=context) \n prod_ids1=[] \n for pr in lele:\n prod_ids1.append((0,0, {'susunan':pr.susunan,'name':pr.name,'kelamin':pr.kelamin,'kota_id':pr.kota_id.id,'tgl_lahir':pr.tgl_lahir,'type_id':pr.type_id.id,'pekerjaan':pr.pekerjaan})) \n \n ###### riwayat Pendidikan #######\n le=self.pool.get('hr_recruit.rwt_pend')\n lel=le.search(cr,uid,[('applicant_id','=',coy)])\n lele=le.browse(cr,uid,lel,context=context) \n prod_ids2=[] \n for pr in lele:\n prod_ids2.append((0,0, {'name':pr.name,'jurusan':pr.jurusan.id,'tempat':pr.tempat,'tahun_msk':pr.tahun_msk,'tahun_klr':pr.tahun_klr,'ijazah':pr.ijazah.id})) \n \n ###### bahasa ######\n le=self.pool.get('hr_recruit.bahasa')\n lel=le.search(cr,uid,[('applicant_id','=',coy)])\n lele=le.browse(cr,uid,lel,context=context) \n prod_ids3=[] \n for pr in lele:\n prod_ids3.append((0,0, {'name':pr.name.id,'tulis':pr.tulis.id,'lisan':pr.lisan.id})) \n \n ##### Riwayat Pekerjaan ####\n le=self.pool.get('hr_recruit.rwt_krj')\n lel=le.search(cr,uid,[('applicant_id','=',coy)])\n lele=le.browse(cr,uid,lel,context=context) \n prod_ids4=[] \n for pr in lele:\n prod_ids4.append((0,0, {'no':pr.no,'name':pr.name,'tempat':pr.tempat,'tahun_msk':pr.tahun_msk,'tahun_klr':pr.tahun_klr,'jabatan':pr.jabatan,'gaji':pr.gaji,'alasan':pr.alasan})) \n \n ###### Koneksi Internal #####\n le=self.pool.get('hr_recruit.kon1')\n lel=le.search(cr,uid,[('applicant_id','=',coy)])\n lele=le.browse(cr,uid,lel,context=context) \n prod_ids5=[] \n for pr in lele:\n prod_ids5.append((0,0, {'employee_id':pr.employee_id.name,'alamat':pr.alamat,'job_id':pr.job_id.id,'telepon':pr.telepon})) \n \n ###### Koneksi Eksternal ####\n le=self.pool.get('hr_recruit.kon2')\n lel=le.search(cr,uid,[('applicant_id','=',coy)])\n lele=le.browse(cr,uid,lel,context=context) \n prod_ids6=[]\n for pr in lele: \n prod_ids6.append((0,0, {'name':pr.name,'alamat':pr.alamat,'jabatan':pr.jabatan,'telepon':pr.telepon})) \n\n ####### create Employee ######## \n emp_id = hr_employee.create(cr, uid, {'name': applicant.partner_name or applicant.name,\n 'job_id': applicant.job_id.id,\n 'department_id' : applicant.department_id.id,\n 'address_id2' : applicant.job_id.address_id.id,\n #### informasi Probadi ####\n 'kelamin':applicant.jen_kel,\n 'blood' : applicant.blood,\n 'agama' : applicant.agama_id.id,\n 'birthday' : applicant.tgl_lahir,\n 'place_of_birth' : applicant.kota_id.name,\n 'marital':applicant.status,\n 'sjk_tanggal' : applicant.sjk_tanggal,\n 'mobile_phone':applicant.partner_phone,\n 'country_id' : applicant.country_id.id,\n\n #### Pendidikan ####\n 'type_id':applicant.type_id.id,\n 'bid_id':applicant.bidang_id.id,\n 'jurusan_id':applicant.jurusan_id.id,\n 'pt_id':applicant.pt_id.id,\n 'gelar_id':applicant.gelar_id.id,\n\n #### alamat DOmisili ####\n 'country_id1':applicant.country_id1.id,\n 'prov_id':applicant.prov_id.id,\n 'kab_id' : applicant.kab_id.id,\n 'kec_id':applicant.kec_id.id,\n 'alamat1' : applicant.alamat1,\n 'kodepos' :applicant.kode1,\n 'telp1' : applicant.telp1,\n\n #### kartu identitas ####\n 'jenis_id': applicant.jenis_id,\n 'ktp' : applicant.no_id,\n 'tgl_berlaku' : applicant.tgl_berlaku,\n # 'issued_id' : applicant.dikeluarkan.id,\n \n #### Alamat Sesuai KTP #### \n 'country_id2':applicant.country_id2.id,\n 'prov_id2':applicant.prov_id2.id,\n 'kab_id2':applicant.kab_id2.id,\n 'kec_id2':applicant.kec_id2.id,\n 'alamat2' : applicant.alamat2,\n 'kodepos1':applicant.kode2,\n 'telp2' : applicant.telp2,\n \n # 'status': applicant.status,\n #### IDS ####\n 'susunan_kel1_ids' : prod_ids,\n 'susunan_kel2_ids':prod_ids1,\n 'rwt_pend_ids':prod_ids2,\n 'bahasa_ids':prod_ids3,\n 'rwt_krj_ids':prod_ids4,\n 'koneksi1_ids':prod_ids5,\n 'koneksi2_ids':prod_ids6, \n })\n self.write(cr, uid, [applicant.id], {'emp_id': emp_id}, context=context)\n self.pool['hr.job'].message_post(\n cr, uid, [applicant.job_id.id],\n body=_('New Employee %s Hired') % applicant.partner_name if applicant.partner_name else applicant.name,\n subtype=\"hr_recruitment.mt_job_applicant_hired\", context=context)\n else:\n raise osv.except_osv(_('Warning!'), _('You must define an Applied Job and a Contact Name for this applicant.'))\n\n action_model, action_id = model_data.get_object_reference(cr, uid, 'hr', 'open_view_employee_list')\n dict_act_window = act_window.read(cr, uid, [action_id], [])[0]\n if emp_id:\n dict_act_window['res_id'] = emp_id\n dict_act_window['view_mode'] = 'form,tree'\n return dict_act_window",
"def createEmployee():\n form = CreateEmployeeForm()\n if form.validate_on_submit():\n hashed_password = bcrypt.generate_password_hash(form.password.data).decode('utf-8')\n staff = Staff(first_name=form.first_name.data, last_name=form.last_name.data, password=hashed_password, \n email=form.email.data, role=form.role.data, location=form.location.data)\n db.session.add(staff)\n db.session.commit()\n flash(f'Employee Added To Database', category='Success')\n return redirect(url_for('login'))\n return render_template('new_employee.html', title=\"Register\", form=form)",
"def employee_add(request, user_id):\n\n if request.method == \"GET\":\n employee = User.objects.get(pk=user_id)\n orgs = Organization.objects.filter(administrator=request.user)\n\n #desig_list = orgs.values_list('designations', flat=True)\n #designations = Designation.objects.filter(pk__in=desig_list)\n\n return render_to_response('organization/employee_add1.html',\n {'employee':employee, 'orgs': orgs},\n context_instance=RequestContext(request))\n\n user_id = request.POST.get('employee_id')\n org = request.POST.get('add2orgs') # id of selected org\n designations_list = request.POST.getlist('designations2add')#list of ids of selected designation\n\n try:\n user = User.objects.get(pk=user_id)# emp obj to add\n org = Organization.objects.get(pk=org)#selected org objects\n designations = Designation.objects.filter(pk__in=designations_list)#convert desig id in obj\n except Exception, e:\n # log this error\n print str(e)\n messages.error(request, str(e))\n user = org = designations = None\n\n if not (user and designations.count()):\n # add error message.\n messages.error(request, \"Select atleast one Organization and its Designation\")\n # redirect to the same page.\n return HttpResponseRedirect(request.path)\n\n #def send_email():\n # template = get_template('organization/confirmation_email.txt')\n # context = Context({'user':user,'org':o.organization,'desig':desig})\n # subject = u'confirmation email'\n # message = template.render(context)\n # send_mail(subject,message,settings.DEFAULT_FROM_EMAIL,['ravirajmca12@gmail.com'])\n\n # create visiting card(s) for the employee\n try:\n l=[]\n for desig in designations:\n if desig in org.designations.all():\n o,c = VisitingCards.objects.get_or_create(organization=org,\n designation=desig, user=user)\n\n if c:\n #send_email()\n template = get_template('organization/confirmation_email.txt')\n context = Context({'user':user,'org':o.organization,'desig':desig})\n subject = u'confirmation email'\n message = template.render(context)\n msg = (subject,message,settings.DEFAULT_FROM_EMAIL, ['ravirajmca12@gmail.com'])\n l.append(msg)\n #t=threading.Thread(target=send_email)\n #t.start()\n messages.success(request,\"Employee Added Succefully..!\")\n else:\n messages.info(request,\" %s Already Added ..!\" % user.username)\n else:\n messages.error(request,\"%s Doesnt have selected designation %s .!\" %(orgs, desig))\n raise Exception()\n if c:\n tupl=tuple(l)\n threading.Thread(send_mass_mail(tupl, fail_silently=False)).start()\n messages.success(request,\"Employee Added Succefully..!\")\n\n\n\n except:\n messages.error(request,\"something went wroung \")\n return HttpResponseRedirect(request.path)\n\n return HttpResponseRedirect(\"/org/user/list\")",
"def add_employee():\n\n if not g.user:\n flash(\"Please login to access\", \"danger\")\n return redirect(\"/\")\n \n if g.user.is_admin == False:\n flash (\"Unauthorized\", \"danger\")\n return redirect(\"/login\")\n\n form = SignUp_Form()\n \n\n if form.validate_on_submit():\n try: \n employee = Employee.register(\n username = form.username.data,\n password = form.password.data, \n email = form.email.data, \n first_name = form.first_name.data,\n last_name = form.last_name.data,\n hire_date = form.hire_date.data, \n is_admin = form.is_admin.data,\n )\n\n db.session.add(employee)\n\n db.session.commit()\n except IntegrityError:\n flash(\"Email already in use\", \"danger\")\n return render_template(\"/admin/add_user.html\", form = form)\n\n flash(\"Employee Added!\", \"success\")\n return redirect(\"/administrator\")\n else:\n\n return render_template(\"/admin/add_user.html\", form = form)",
"def add(request):\n if request.method == 'POST':\n form = CompanyForm(request.POST, request.FILES)\n if form.is_valid():\n form.instance.owner = request.user\n form.save()\n url = reverse('companies_list_all')\n return HttpResponseRedirect(url)\n else:\n form = CompanyForm()\n\n context = dict(form=form)\n return render(request, 'companies/add.html', context)",
"def test_website_companies_create(self):\n pass",
"def detail(request, company_id):\n company = get_object_or_404(Company, pk=company_id)\n\n company_form = CompanyForm(instance=company)\n contact_form = ContactCreationForm()\n\n return render(request, 'companies/detail.html', {\n 'company_detail': company,\n 'company_form': company_form,\n 'contact_form': contact_form\n })",
"def create_company(self):\n self.driver.get(f'{self.base_url}/company-register')\n\n # Fill the company name\n enter_random_string = WebDriverWait(self.driver, 20).until(expected_conditions.presence_of_element_located((By.NAME, 'companyName')))\n enter_random_string.send_keys(self.random_string)\n\n # Press \"Save and Continue\"\n self.driver.find_element_by_xpath('/html/body/div[1]/div/div[3]/div/div[2]/div/div[2]/div[2]/div[2]/div/button').click()\n\n # Wait for the page to load (5 seconds)\n sleep(5)",
"def new_computer(request):\n\n if request.method == \"POST\":\n\n try:\n make = request.POST[\"make\"]\n model = request.POST[\"model\"]\n serial_no = request.POST[\"serial_no\"]\n purchase_date = request.POST[\"purchase_date\"]\n employee_id = request.POST[\"employee\"]\n employee = Employee.objects.get(pk=employee_id)\n\n if make is \"\" or model is \"\" or serial_no is \"\" or purchase_date is \"\":\n return render(request, \"agileHR/computer_new.html\", {\n \"error_message\": \"Please fill out all fields\",\n \"make\": make,\n \"model\": model,\n \"serial_no\": serial_no,\n \"purchase_date\": purchase_date\n })\n else:\n now = datetime.datetime.now()\n new_computer = Computer(make=make, model=model, serial_no=serial_no, purchase_date=purchase_date)\n new_computer.save()\n join = EmployeeComputer.objects.create(\n computer = new_computer,\n employee = employee,\n date_assigned = now\n )\n join.save()\n\n return HttpResponseRedirect(reverse(\"agileHR:computer_detail\", args=(new_computer.id,)))\n except KeyError:\n return render(request, \"agileHR/computer_new.html\", {\n \"error_message\": \"Please fill out all fields\"\n })\n else:\n # Get all computer assignment history\n computer_assignments = EmployeeComputer.objects.all()\n\n # Get employees who have had a computer but do not currently have one.\n need_computers = Employee.objects.exclude(employeecomputer__date_revoked=None).order_by('last_name')\n\n # Get employees who have never had a computer.\n never_computers = Employee.objects.exclude(employeecomputer__in=computer_assignments).order_by('last_name')\n\n # Combine the two querysets\n final_list = need_computers | never_computers\n\n context = {\n \"employees\": final_list\n }\n\n return render(request, \"agileHR/computer_new.html\", context)",
"def action_add(request, employee_id=None):\n if employee_id:\n employee = Employee.objects.get(pk=employee_id)\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n if not current_employee.isEnsoUser() and current_employee.company.pk != employee.company.pk:\n raise PermissionDenied()\n else:\n employee = request.user.employee_user.first()\n if request.method == 'POST':\n form = ActionForm(request.POST)\n if form.is_valid():\n form.save(request.user, employee)\n return HttpResponseRedirect('/action/%d' % form.instance.pk)\n else:\n form = ActionForm()\n return TemplateResponse(\n request,\n 'mus/action_edit.html',\n dict(\n form=form\n )\n )"
] | [
"0.6837375",
"0.67006433",
"0.66819894",
"0.6620597",
"0.6475171",
"0.64458215",
"0.6421396",
"0.6402227",
"0.6344205",
"0.6299245",
"0.626302",
"0.6249063",
"0.6230102",
"0.6173451",
"0.61211884",
"0.6115985",
"0.60931104",
"0.6089694",
"0.60648185",
"0.60492533",
"0.6024815",
"0.6018779",
"0.60011125",
"0.5945085",
"0.5935821",
"0.5935559",
"0.5907307",
"0.58863086",
"0.5843938",
"0.5832742"
] | 0.8076184 | 0 |
View for editing employee | def edit_employee(request, employee_id):
employee = Employee.objects.get(pk=int(employee_id))
current_employee = Employee.objects.get(user__pk=request.user.pk)
assert isinstance(employee, Employee)
assert isinstance(current_employee, Employee)
# if not current_employee.isEnsoUser() and current_employee.company.pk != employee.company.pk:
# raise PermissionDenied()
if not current_employee.hasAccessTo(employee):
raise PermissionDenied()
form = EditEmployeeForm(request.user, employee, {
'first_name': employee.user.first_name,
'last_name': employee.user.last_name,
'email': employee.user.email,
'manager': employee.manager.id if employee.manager else 0,
'language_code': employee.language_code,
# 'development_plan_type': employee.development_plan_type.id,
'is_manager': employee.is_manager
})
if 'manager' in form.fields:
managerQS = Employee.objects.filter(is_manager=True, company__pk=employee.company.pk)
form.fields['manager'].queryset = managerQS
# form.fields['development_plan_type'].queryset = DevelopmentPlanType.objects.filter(
# Q(company__pk=employee.company.pk) | Q(company__isnull=True)
# )
is_me = employee.user.pk == request.user.pk
return TemplateResponse(
request,
'mus/edit_employee_form.html',
{
'edit_employee_form': form,
'employee_id': employee_id,
'me': is_me,
'name': employee.user.get_full_name()
}
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def edit_employee(employee_id):\n\n if not g.user:\n flash(\"Please login to access\", \"danger\")\n return redirect(\"/\")\n \n if g.user.is_admin == False:\n flash (\"Unauthorized\", \"danger\")\n return redirect(\"/login\")\n\n employee = Employee.query.get_or_404(employee_id)\n form = Edit_User_Form(obj = employee)\n \n #form.location.choices = db.session.query(Location.id, Location.site_name).all()\n \n #form.certs.choices = db.session.query(Certs.id , Certs.cert_name).all()\n\n if form.validate_on_submit():\n \n employee.email = form.email.data, \n employee.first_name = form.first_name.data,\n employee.last_name = form.last_name.data,\n employee.hire_date = form.hire_date.data, \n employee.is_admin = form.is_admin.data\n\n \n db.session.commit()\n \n flash(f\"{employee.first_name} {employee.last_name} has been saved\", \"success\")\n return redirect(\"/administrator\")\n else:\n\n return render_template(\"/admin/edit_user.html\", employee = employee, form = form)",
"def enterprise_edit(request):\r\n action = tool.get_param_by_request(request.GET, 'action', \"add\", str)\r\n career_id = tool.get_param_by_request(request.GET, 'careerId', 0, int)\r\n\r\n enterprise = APIResult()\r\n c = None\r\n if action == \"add\":\r\n c = {\"career_id\": career_id, \"action\": action}\r\n return render_to_response(\"mz_course/careerIntroduce/careerIntroduce_enterprise_edit.html\", c,\r\n context_instance=RequestContext(request))\r\n\r\n if action == \"edit\" and (not career_id):\r\n _id = tool.get_param_by_request(request.GET, 'enterpriseId', 0, int)\r\n enterprise = api_enterprise.get_career_page_enterprise_by_id(_id)\r\n c = {\"enterprises\": enterprise.result()[0], \"action\": action}\r\n if enterprise.is_error():\r\n return render_to_response(\"404.html\", {}, context_instance=RequestContext(request))\r\n else:\r\n return render_to_response(\"mz_course/careerIntroduce/careerIntroduce_enterprise_edit.html\", c,\r\n context_instance=RequestContext(request))\r\n\r\n if action == \"edit\" and career_id:\r\n enterprise = api_enterprise.list_career_page_enterprise_by_career_id(career_id)\r\n c = {\"enterprises\": enterprise.result(), \"action\": action}\r\n\r\n if enterprise.is_error():\r\n return render_to_response(\"404.html\", {}, context_instance=RequestContext(request))\r\n else:\r\n return render_to_response(\"mz_course/careerIntroduce/careerIntroduce_enterprise_list.html\", c,\r\n context_instance=RequestContext(request))",
"def edit_person(self, pk):",
"def action_edit(request, action_id):\n employee = request.user.employee_user.first()\n action = Action.objects.get(pk=action_id)\n if not employee.isEnsoUser() and employee.company.pk != action.employee.company.pk:\n raise PermissionDenied()\n # if request.method == 'POST':\n form = ActionForm(request.POST, instance=action)\n if form.is_valid():\n form.save(request.user, employee)\n return HttpResponseRedirect('/action/%d' % form.instance.pk)\n # else:\n # form = ActionForm(instance=action)\n # return TemplateResponse(\n # request,\n # 'mus/action_edit.html',\n # dict(\n # form=form,\n # edit=True\n # )\n # )\n\n # return JsonResponse(status=200, data={\"data\": form.instance.title, \"edit\": True})",
"def employee_detail(request, employee_id):\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n employee = Employee.objects.get(pk=int(employee_id))\n if not current_employee.isEnsoUser() and current_employee.company.pk != employee.company.pk:\n raise PermissionDenied()\n actions = employee.action_set.all()\n if not current_employee.pk == int(employee_id):\n if not current_employee.is_manager or not current_employee.company.pk == employee.company.pk:\n if not current_employee.isCompanySuperUserOrHigher():\n return HttpResponse('unauthorized', status=401)\n\n user_files = get_files_for_employee(employee_id)\n\n if request.method == 'POST':\n\n upload_form = UploadFileToEmployeyForm(request.POST, request.FILES)\n form = EmployeeNoteForm(request.POST, instance=employee)\n\n if 'upload' in request.POST:\n if upload_form.is_valid():\n upload_form.handle_upload(employee_id, request.FILES['file'])\n\n return HttpResponseRedirect('/employee/show/{}?upload_status=ok#file-list'.format(employee_id))\n\n else:\n if form.is_valid():\n form.save(request.user, employee)\n return HttpResponseRedirect('/employee/show/%d' % form.instance.pk)\n\n else:\n form = EmployeeNoteForm(instance=employee)\n upload_form = UploadFileToEmployeyForm()\n data = {}\n data[\"first_name\"] = employee.user.first_name\n data[\"last_name\"] = employee.user.last_name\n data[\"email\"] = employee.user.email\n data[\"is_manager\"] = employee.is_manager\n data[\"language_code\"] = employee.language_code\n employee_role = EmployeeRole.objects.filter(employee=employee).all()\n name_role_list = []\n for obj in employee_role:\n name_role_list.append(obj.role.name)\n data[\"roles\"] = name_role_list\n return JsonResponse(status=201, data=data)\n # return TemplateResponse(\n # request,\n # 'mus/detail.html',\n # {\n # 'actions': actions,\n # 'employee': employee,\n # # 'development_plans': development_plans,\n # 'form': form,\n # 'upload_form': upload_form,\n # 'user_files': user_files\n # }\n # )",
"def edit(self, **kwargs):\n ...",
"def edit(self):\n\n pass",
"def edit_employee_hours(employee_id):\n\n if not g.user:\n flash(\"Please login to access\", \"danger\")\n return redirect(\"/\")\n \n if g.user.is_admin == False:\n flash (\"Unauthorized\", \"danger\")\n return redirect(\"/login\")\n\n employee = Employee.query.get_or_404(employee_id)\n form = Edit_Hours_Form(obj = employee)\n\n if form.validate_on_submit():\n \n employee.completed = form.completed.data, \n employee.required = form.required.data,\n \n db.session.commit()\n \n flash(f\"{employee.first_name} {employee.last_name} has been saved\", \"success\")\n return redirect(\"/administrator\")\n else:\n\n return render_template(\"/admin/edit_hours.html\", employee = employee, form = form)",
"def task_edit(request, pk):\n task_manager = TaskManager.objects.get(id=pk)\n task = task_manager.task\n if request.method == 'POST':\n \ttask_form = TaskForm(request.POST)\n \ttask_owner = request.user\n\n \tif task_form.is_valid():\n \t\ttask_name = task_form.cleaned_data.get('task_name')\n \t\ttask_description = task_form.cleaned_data.get('task_description')\n\n \t\tif task_manager.task_owner == task_owner:\n \t\t\ttask.task_name = task_name\n \t\t\ttask.task_description = task_description\n \t\t\ttask.save()\n \t\t\treturn redirect('task_list')\n else:\n \tform = TaskForm(instance=task)\n\n context = {'form': form, 'task_manager':task_manager}\n return render(request, 'tasker/task_edit.html', context)",
"def edit_form():\n return template (\"edit\")",
"def editar_empresa(id):\n cadastrando_empresa = False\n\n empresa = Empresa.query.get_or_404(id)\n form = EditarEmpresaForm(obj=empresa)\n\n if form.validate_on_submit():\n empresa.nome = form.nome.data\n empresa.simbolo = form.simbolo.data\n empresa.regiao = form.regiao.data\n empresa.tipo = form.tipo.data\n empresa.abertura = form.abertura.data\n empresa.fechamento = form.fechamento.data\n empresa.zona = form.zona.data\n empresa.moeda = form.moeda.data\n db.session.commit()\n flash('Empresa editada com sucesso!')\n\n return redirect(url_for('home.listar_empresas'))\n\n form.nome.data = empresa.nome\n form.simbolo.data = empresa.abertura \n form.regiao.data = empresa.regiao\n form.tipo.data = empresa.tipo\n form.abertura = empresa.abertura\n form.fechamento = empresa.fechamento\n form.zona.data = empresa.zona\n form.moeda.data = empresa.moeda\n\n\n return render_template('home/empresa.html', action=\"Edit\",\n cadastrando_empresa=cadastrando_empresa, form=form,\n empresa=empresa, title=\"Editar empresa\")",
"def UpdateEmployee():\n staff = current_user\n form = UpdateEmployeeForm()\n if form.validate_on_submit():\n staff.first_name=form.first_name.data.lower()\n staff.last_name=form.last_name.data.lower()\n staff.email=form.email.data\n staff.location=form.location.data\n db.session.commit()\n flash(f'Employee Updated', category='Success')\n elif request.method == 'GET':\n form.first_name.data=staff.first_name.capitalize()\n form.last_name.data=staff.last_name.capitalize()\n form.email.data=staff.email\n form.role.choices=[staff.role]\n form.location.data=staff.location\n return render_template('update_employee.html', title=\"Update Employee\", form=form)",
"def employee(employee_id):\n # gather data from db about all employees\n return render_template(\"employee.html\",\n employee_id=employee_id)",
"def employee():\n return Response(render_template('employee/employee.html'))",
"def edit_register(id):\n add_employee = False\n\n employee = Employee.query.get_or_404(id) #from table\n print('----update 1----')\n form = UpdateForm(obj=employee) #if not 404\n print('----update 2----')\n if form.validate_on_submit():\n employee.email = email=form.email.data\n employee.username=form.username.data\n employee.glad_id=form.glad_id.data\n employee.tel_no=form.tel_no.data\n employee.role_id=form.role_id.data\n employee.password=form.password.data\n\n # UPDATE employee to the database\n print('----update----',employee.role_id)\n db.session.commit()\n flash('You have successfully updated! ')\n\n # # redirect to the login page\n # return redirect(url_for('auth.login'))\n\n # load registration template\n return render_template('auth/register.html', form=form, title='Update')",
"def edit_view(request, title, modelform, instance=None, **kwargs):\n instance_form = modelform(request.POST or None, instance=instance)\n if instance_form.is_valid():\n instance = instance_form.save()\n messages.success(request, _(\"%s was edited.\") % instance)\n return redirect(instance.get_absolute_url())\n return form(\n {**kwargs, \"form\": instance_form, \"action_name\": _(\"Edit\"), \"title\": title},\n \"deployments/form.html\",\n request,\n )",
"def profile_detail(request, employee_id):\n current_employee = Employee.objects.filter(user__pk=request.user.pk).first()\n employee = Employee.objects.get(pk=int(employee_id))\n\n if not current_employee:\n raise PermissionDenied(\"You don't have any employee assigned to you.\", 401)\n\n if not current_employee.isEnsoUser() and current_employee.company.pk != employee.company.pk:\n raise PermissionDenied()\n actions = employee.action_set.all()\n if not current_employee.pk == int(employee_id):\n if not current_employee.is_manager or not current_employee.company.pk == employee.company.pk:\n if not current_employee.isCompanySuperUserOrHigher():\n return HttpResponse('unauthorized', status=401)\n\n user_files = get_files_for_employee(employee_id)\n\n if request.method == 'POST':\n\n upload_form = UploadFileToEmployeyForm(request.POST, request.FILES)\n form = EmployeeNoteForm(request.POST, instance=employee)\n\n if 'upload' in request.POST:\n if upload_form.is_valid():\n upload_form.handle_upload(employee_id, request.FILES['file'])\n\n return HttpResponseRedirect('/employee/show/{}?upload_status=ok#file-list'.format(employee_id))\n\n else:\n if form.is_valid():\n form.save(request.user, employee)\n return HttpResponseRedirect('/employee/show/%d' % form.instance.pk)\n\n else:\n form = EmployeeNoteForm(instance=employee)\n upload_form = UploadFileToEmployeyForm()\n data = {}\n data[\"user\"] = employee.user.first_name + \" \" + employee.user.last_name\n data[\"id\"] = str(employee.user.pk)\n data[\"title\"] = employee.title\n data[\"email\"] = employee.user.email\n data[\"phone\"] = employee.phone\n company_dict = {}\n company_dict[\"name\"] = employee.company.name\n company_dict[\"id\"] = str(employee.company.pk)\n\n data[\"company\"] = company_dict\n employee_username = \"\"\n emp = Employee.objects.filter(manager=employee.manager).all()\n for obj in emp:\n employee_username = obj.manager.user.username if obj.manager else \"\"\n employee_first = obj.manager.user.first_name if obj.manager else \"\"\n employee_last = obj.manager.user.last_name if obj.manager else \"\"\n manager_dict = {}\n manager_dict[\"name\"] = employee_username\n manager_dict[\"id\"] = employee_id\n manager_dict[\"first_last_name\"] = employee_first + \" \" + employee_last\n data[\"manager\"] = manager_dict\n data[\"date_of_birth\"] = employee.date_of_birth\n data[\"status_questions\"] = employee.status_questions\n data[\"notes\"] = employee.notes\n employee_role = EmployeeRole.objects.filter(employee=employee).all()\n name_role_list = []\n for obj in employee_role:\n name_role_list.append(obj.role.name)\n data[\"roles\"] = name_role_list\n data[\"potenciale\"] = employee.potenciale\n data[\"date_start\"] = employee.created_at\n data[\"is_manager\"] = employee.is_manager\n data[\"date_finish\"] = \"\"\n data['photo'] = employee.photo.url if employee.photo else ''\n\n return JsonResponse(status=200, data=data)\n # return TemplateResponse(\n # request,\n # 'mus/detail.html',\n # {\n # 'actions': actions,\n # 'employee': employee,\n # # 'development_plans': development_plans,\n # 'form': form,\n # 'upload_form': upload_form,\n # 'user_files': user_files\n # }\n # )",
"def view_edit(\n request: HttpRequest,\n pk: Optional[int] = None,\n workflow: Optional[Workflow] = None,\n view: Optional[View] = None,\n) -> JsonResponse:\n # Form to read/process data\n form = ViewAddForm(request.POST or None, instance=view, workflow=workflow)\n\n return save_view_form(\n request,\n form,\n 'table/includes/partial_view_edit.html')",
"def office_view(request, slug, id):\n company =get_object_or_404(Company,slug=slug)\n edit = validate_user_company_access_or_redirect(request,company)\n office_reference = get_object_or_404(Office, id=id,company=company)\n\n return render_to_response('office_form.html', \n {'details': office_reference,'info':office_reference},\n context_instance=RequestContext(request))",
"def show_edit_form(user_id):\n user = User.query.get_or_404(user_id)\n return render_template('edit.html', user=user)",
"def team_edit(team_id):\n if request.method == 'GET':\n team = Team.query.filter_by(team_id=team_id).one()\n return render_template('edit_team.html', team=team)",
"def edit(request, company_id=None):\n if company_id:\n company = get_object_or_404(Company, id=company_id)\n if request.POST and company.owner == request.user:\n form = CompanyForm(request.POST, instance=company)\n if form.is_valid():\n form.save()\n return HttpResponseRedirect('/companies')\n if company.owner != request.user:\n return HttpResponseForbidden()\n form = CompanyForm(instance=company)\n context = dict(form=form)\n return render(request, 'companies/edit.html', context)\n else:\n companies = Company.objects.filter(owner=request.user)\n context = dict(companies=companies)\n return render(request, 'companies/companies_by_user.html', context)",
"def edit(self, *args, **kw):\n\t\t\ttmpl_context.widget = self.edit_form\n\t\t\tpks \t\t= self.provider.get_primary_fields(self.model)\n\t\t\tkw \t\t\t= {}\n\n\t\t\tfor i, pk in enumerate(pks):\n\t\t\t\tkw[pk] \t\t= args[i]\n\n\t\t\tvalue \t\t= self.edit_filler.get_value(kw)\n\t\t\tvalue['_method'] \t= 'PUT'\n\n\t\t\treturn dict(value = value, model = self.model.__name__, pk_count = len(pks))",
"def edit_employee_locations(employee_id):\n\n if not g.user:\n flash(\"Please login to access\", \"danger\")\n return redirect(\"/\")\n \n if g.user.is_admin == False:\n flash (\"Unauthorized\", \"danger\")\n return redirect(\"/login\")\n\n employee = Employee.query.get_or_404(employee_id)\n \n form = Add_Loc_Form(obj = employee)\n \n form.location.choices = db.session.query(Location.id, Location.site_name).all()\n \n \n if form.validate_on_submit():\n \n location = Location.query.get(form.location.data) \n employee.locations.append(location)\n db.session.add(employee)\n \n db.session.commit()\n\n \n\n flash(f\"{employee.first_name} {employee.last_name} has been saved\", \"success\")\n return redirect(\"/administrator\")\n else:\n \n return render_template(\"/admin/employee_cert.html\", employee = employee, form = form)",
"def put(self, request, pk):\n data = request.data\n data.pop('skills')\n Department_name = data.pop('department')\n department = Department.objects.get(name=Department_name)\n manager_name = data.pop('manager')\n manager = Manager.objects.get(name=manager_name)\n EmployeeDetail.objects.filter(pk=pk).update(department=department, manager=manager, **data)\n return Response(\n data=\"request.data\"\n )",
"def show_edit_form(user_id):\n\n user = User.query.get_or_404(user_id)\n\n return render_template(\"users/edit_user.html\", user=user)",
"def edit(request, pk):\n template_var = base_template_vals(request)\n user = template_var[\"u\"]\n event = Event.objects.get(id=pk)\n template_var[\"e\"] = event\n if user.is_superuser or user.is_moderator:\n if request.method == 'POST':\n title = request.POST['title']\n refer = request.POST['refer']\n date = request.POST['date']\n time = request.POST['time']\n loc = request.POST['loc']\n body = request.POST['body']\n \n # Deal with time field\n try:\n event_datetime = date + ' ' + time\n print event_datetime\n event_datetime = datetime.strptime(event_datetime, '%m/%d/%Y %H:%M')\n except ValueError:\n print \"Error when processing time field\"\n \n # Deal with tags checkbox list\n tags = request.POST.getlist(\"tags\") \n if len(tags) == 0:\n event.tags.add(\"untagged\")\n else:\n taglist = list(tags)\n for t in taglist:\n event.tags.add(t)\n \n event.title = title\n event.refer = refer\n event.event_time = event_datetime\n event.location = loc\n event.body = body\n event.save() \n return single(request, pk)\n return render_to_response(\"event/event_edit.html\", template_var,\n context_instance=RequestContext(request))\n else :\n return redirect('index')",
"def edit_user(user_id):\n\n user = User.query.get_or_404(user_id)\n return render_template('users/edit.html', user=user)",
"def edit(self, id, *args, **kw):\n atras = \"/rolesplantilla/\"\n if (not kw['contexto']):\n redirect('../')\n elif (kw['contexto'] == \"proyecto\"):\n selector = SelectorPermisosPlantillaProy\n elif (kw['contexto'] == \"fase\"):\n selector = SelectorPermisosPlantillaFase\n elif (kw['contexto'] == \"ti\"):\n kw[\"contexto\"] = u\"Tipo de Ítem\"\n selector = SelectorPermisosPlantillaTi\n \n self.edit_form = RolPlantillaEditForm(DBS=DBSession, selector=selector) \n tmpl_context.widget = self.edit_form\n rol_plantilla_edit_form = self.edit_form\n \n \n page=u\"Editar Rol Plantilla de {contexto}\".format(contexto=kw['contexto'])\n \n value = self.edit_filler.get_value(values={'id_rol': int(id)})\n \n #agregado\n if value[\"tipo\"].find(\"Plantilla\") < 0:\n page=u\"Editar Rol de {contexto}\".format(contexto=kw['contexto'])\n atras = \"/roles/\"\n \n return dict(value=value, page=page, atras=atras)",
"def edit_user(user_id):\n user = User.query.get_or_404(user_id)\n return render_template('/users/edit_page.html', user=user)"
] | [
"0.7485564",
"0.6978975",
"0.6902276",
"0.6840151",
"0.6783405",
"0.6756372",
"0.66953164",
"0.66938245",
"0.66192317",
"0.65741867",
"0.65598595",
"0.65310377",
"0.6502672",
"0.6400611",
"0.6391832",
"0.6368707",
"0.63276947",
"0.6300049",
"0.6295493",
"0.62654054",
"0.6208546",
"0.61726767",
"0.612466",
"0.6097169",
"0.60935307",
"0.60688716",
"0.6059109",
"0.60383636",
"0.60363305",
"0.60226"
] | 0.7715588 | 0 |
View for list of actions of (current) employee | def action_list(request, employee_id=None):
if employee_id:
employee = Employee.objects.get(pk=employee_id)
current_employee = Employee.objects.get(user__pk=request.user.pk)
if not current_employee.isEnsoUser() and current_employee.company.pk != employee.company.pk:
raise PermissionDenied()
else:
employee = request.user.employee_user.first()
actions = employee.action_set.all()
return TemplateResponse(
request,
'mus/action_list.html',
dict(
actions=actions,
employee=employee
)
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def show_overview_of_all_employees(self):\n\n print(\"OVERVIEW OF EMPLOYEES\\n\")\n\n employees_ob_list = self.llapi.get_employee_overview()\n \n for employee_ob in employees_ob_list:\n print(employee_ob.print_info_in_line(\"*\"))\n \n print(f\"\\nNAN AIR has {len(employees_ob_list)} employees\")\n\n print(\"\\nB Back\\n\")\n\n action_str = self.choose_action([\"b\"])\n while action_str == False:\n action_str = self.choose_action([\"b\"])\n\n if action_str == \"b\":\n return",
"def employees():\n # gather data from db about all employees\n return render_template(\"employees.html\")",
"def list(self, request):\n employee = self.controller.retrieve_all_employees()\n serializer = data_serializers.PresentEmployeeDataSerializer(employee, many=True)\n return Response(serializer.data)",
"def show_employee_menu(self):\n \n action_str = \"\"\n\n while True:\n print(self.LENGTH_STAR * \"*\")\n print(\"EMPLOYEES MENU\\n\")\n print(\"1 Print overview of all employees\")\n print(\"2 Pilots\")\n print(\"3 Cabin Crew\")\n print(\"B Back\\n\")\n\n action_str = self.choose_action([\"1\", \"2\" ,\"3\" ,\"b\"])\n while action_str == False:\n action_str = self.choose_action([\"1\", \"2\", \"3\", \"b\"])\n\n if action_str == \"1\":\n self.show_overview_of_all_employees()\n\n elif action_str == \"2\":\n self.show_pilot_or_crew_menu(self.PILOT)\n\n elif action_str == \"3\":\n self.show_pilot_or_crew_menu(self.CREW)\n\n elif action_str == \"b\":\n return",
"def show_all_employees():\n if not g.user:\n flash(\"Please login to access\", \"danger\")\n return redirect(\"/\")\n if g.user.is_admin == False:\n flash (\"Unauthorized\", \"danger\")\n return redirect(\"/login\")\n\n employees = Employee.query.all()\n \n \n ## right now this is ALL users... \n \n return render_template(\"employee_display.html\", employees = employees)",
"def employee(employee_id):\n # gather data from db about all employees\n return render_template(\"employee.html\",\n employee_id=employee_id)",
"def employee_detail(request, employee_id):\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n employee = Employee.objects.get(pk=int(employee_id))\n if not current_employee.isEnsoUser() and current_employee.company.pk != employee.company.pk:\n raise PermissionDenied()\n actions = employee.action_set.all()\n if not current_employee.pk == int(employee_id):\n if not current_employee.is_manager or not current_employee.company.pk == employee.company.pk:\n if not current_employee.isCompanySuperUserOrHigher():\n return HttpResponse('unauthorized', status=401)\n\n user_files = get_files_for_employee(employee_id)\n\n if request.method == 'POST':\n\n upload_form = UploadFileToEmployeyForm(request.POST, request.FILES)\n form = EmployeeNoteForm(request.POST, instance=employee)\n\n if 'upload' in request.POST:\n if upload_form.is_valid():\n upload_form.handle_upload(employee_id, request.FILES['file'])\n\n return HttpResponseRedirect('/employee/show/{}?upload_status=ok#file-list'.format(employee_id))\n\n else:\n if form.is_valid():\n form.save(request.user, employee)\n return HttpResponseRedirect('/employee/show/%d' % form.instance.pk)\n\n else:\n form = EmployeeNoteForm(instance=employee)\n upload_form = UploadFileToEmployeyForm()\n data = {}\n data[\"first_name\"] = employee.user.first_name\n data[\"last_name\"] = employee.user.last_name\n data[\"email\"] = employee.user.email\n data[\"is_manager\"] = employee.is_manager\n data[\"language_code\"] = employee.language_code\n employee_role = EmployeeRole.objects.filter(employee=employee).all()\n name_role_list = []\n for obj in employee_role:\n name_role_list.append(obj.role.name)\n data[\"roles\"] = name_role_list\n return JsonResponse(status=201, data=data)\n # return TemplateResponse(\n # request,\n # 'mus/detail.html',\n # {\n # 'actions': actions,\n # 'employee': employee,\n # # 'development_plans': development_plans,\n # 'form': form,\n # 'upload_form': upload_form,\n # 'user_files': user_files\n # }\n # )",
"def employee():\n return Response(render_template('employee/employee.html'))",
"def get_actions(self, request):\n return super(OrganizationAdmin, self).get_actions(request)",
"def list(self, request):\n varemployee = employee.objects.all()\n serializer = employeeSerializer(varemployee, many=True)\n return Response(serializer.data)",
"def __actions__(self, obj):\n primary_fields = self.__provider__.get_primary_fields(self.__entity__)\n pklist = '/'.join(map(lambda x: str(getattr(obj, x)), primary_fields))\n #if has_permission('manage'):############\n \n historial = DBSession.query(Item.nrohistorial).filter_by(id=pklist).first()\n idlineabase = DBSession.query(Item.idLineaBase).filter_by(nrohistorial=historial, ultimaversion=1).first()\n lineabase = DBSession.query(LineaBase).filter_by(id=idlineabase).first()\n \n value = '<div></div>'\n \n if lineabase != None:\n if str(lineabase.estado).__eq__('abierta'):\n value = '<div><a class=\"loginlogout\" href=\"'+pklist+'/edit\" style=\"text-decoration:none\">Revertir</a></div>'\n else:\n value = '<div><a class=\"loginlogout\" href=\"'+pklist+'/edit\" style=\"text-decoration:none\">Revertir</a></div>'\n \n return value",
"def current_employee(self, request: Request) -> Response:\n serializer = self.get_serializer_class()\n serializer = serializer(request.user, context={'request': request})\n return Response(serializer.data)",
"def action_detail(request, action_id):\n employee = request.user.employee_user.first()\n action = Action.objects.get(pk=int(action_id))\n # if not employee.isEnsoUser() and employee.company.pk != action.employee.company.pk:\n if not employee.hasAccessTo(action.employee):\n raise PermissionDenied()\n\n if request.method == 'POST':\n form = ActionCommentForm(request.POST)\n if form.is_valid():\n form.save(request.user, action)\n return HttpResponseRedirect('/action/%s' % action_id)\n else:\n form = ActionCommentForm()\n return TemplateResponse(\n request,\n 'mus/action_detail.html',\n dict(\n action=action,\n form=form\n )\n )",
"def view_attendance(request):\n\n\tcontext_dict = {\n\t\t'title': 'All Attendance',\n\t}\n\treturn render(request, \"viewAttendance.html\", context_dict)",
"def show_activities(self): \n database = Database('data/database.db')\n activities = database.read_activities()\n view = Template(filename=\"view/template.html\", lookup=lookup)\n \n \n return view.render(\n rows = [[item.number, item.name] for item in activities],\n pageTitle = \"Activités\",\n tableTitle = \"Liste de toutes les activités\",\n ths = [\"Numéro\", \"Nom\"]\n )",
"def action_edit(request, action_id):\n employee = request.user.employee_user.first()\n action = Action.objects.get(pk=action_id)\n if not employee.isEnsoUser() and employee.company.pk != action.employee.company.pk:\n raise PermissionDenied()\n # if request.method == 'POST':\n form = ActionForm(request.POST, instance=action)\n if form.is_valid():\n form.save(request.user, employee)\n return HttpResponseRedirect('/action/%d' % form.instance.pk)\n # else:\n # form = ActionForm(instance=action)\n # return TemplateResponse(\n # request,\n # 'mus/action_edit.html',\n # dict(\n # form=form,\n # edit=True\n # )\n # )\n\n # return JsonResponse(status=200, data={\"data\": form.instance.title, \"edit\": True})",
"def actions(self):\n raise NotImplementedError",
"def get(self):\n resultado = EmployeeModel.query.all()\n return resultado",
"def list(self, request):\n teams = self.controller.retrieve_all_teams_employees()\n serializer = data_serializers.PresentTeamEmployeeDataSerializer(teams, many=True)\n return Response(serializer.data)",
"def show_all_information():\n if not g.user:\n flash(\"Please login to access\", \"danger\")\n return redirect(\"/\")\n if g.user.is_admin == False:\n flash (\"Unauthorized\", \"danger\")\n return redirect(\"/login\")\n\n employees = Employee.query.all()\n all_certs = employee_certification.query.all()\n \n return render_template(\"admin.html\", employees = employees, all_certs = all_certs)",
"def enterprise_edit(request):\r\n action = tool.get_param_by_request(request.GET, 'action', \"add\", str)\r\n career_id = tool.get_param_by_request(request.GET, 'careerId', 0, int)\r\n\r\n enterprise = APIResult()\r\n c = None\r\n if action == \"add\":\r\n c = {\"career_id\": career_id, \"action\": action}\r\n return render_to_response(\"mz_course/careerIntroduce/careerIntroduce_enterprise_edit.html\", c,\r\n context_instance=RequestContext(request))\r\n\r\n if action == \"edit\" and (not career_id):\r\n _id = tool.get_param_by_request(request.GET, 'enterpriseId', 0, int)\r\n enterprise = api_enterprise.get_career_page_enterprise_by_id(_id)\r\n c = {\"enterprises\": enterprise.result()[0], \"action\": action}\r\n if enterprise.is_error():\r\n return render_to_response(\"404.html\", {}, context_instance=RequestContext(request))\r\n else:\r\n return render_to_response(\"mz_course/careerIntroduce/careerIntroduce_enterprise_edit.html\", c,\r\n context_instance=RequestContext(request))\r\n\r\n if action == \"edit\" and career_id:\r\n enterprise = api_enterprise.list_career_page_enterprise_by_career_id(career_id)\r\n c = {\"enterprises\": enterprise.result(), \"action\": action}\r\n\r\n if enterprise.is_error():\r\n return render_to_response(\"404.html\", {}, context_instance=RequestContext(request))\r\n else:\r\n return render_to_response(\"mz_course/careerIntroduce/careerIntroduce_enterprise_list.html\", c,\r\n context_instance=RequestContext(request))",
"def committee_show(request, pk):\n committee = Committee.objects.get(pk=pk)\n\n delegates = Delegate.objects.filter(committee_id=pk)\n\n context = {\"committee\": committee, \"delegates\": delegates}\n template = \"jurycore/committee_show.html\"\n return render(request, template, context)",
"def show_timeline(\n request: HttpRequest,\n pk: Optional[int] = None,\n workflow: Optional[Workflow] = None,\n) -> HttpResponse:\n action = None\n if pk:\n action = workflow.actions.filter(pk=pk).first()\n\n if not action:\n # The action is not part of the selected workflow\n return redirect('home')\n logs = workflow.logs.filter(payload__action_id=action.id)\n else:\n logs = workflow.logs\n\n event_names = [\n Log.SCHEDULE_EMAIL_EXECUTE,\n Log.DOWNLOAD_ZIP_ACTION,\n Log.SCHEDULE_JSON_EXECUTE,\n Log.SCHEDULE_CANVAS_EMAIL_EXECUTE,\n Log.SCHEDULE_EMAIL_EDIT,\n Log.SCHEDULE_JSON_EDIT,\n Log.SCHEDULE_CANVAS_EMAIL_EXECUTE,\n Log.SURVEY_INPUT,\n ]\n\n # Filter the logs to display and transform into values (process the json\n # and the long value for the log name\n logs = [\n {'id': log.id,\n 'name': log.get_name_display(),\n 'modified': log.modified,\n 'payload': json.dumps(log.payload, indent=2),\n 'action_name': log.payload['action'],\n 'action_id': log.payload['action_id']}\n for log in logs.filter(name__in=event_names)\n ]\n\n return render(\n request,\n 'action/timeline.html',\n {'event_list': logs, 'action': action})",
"def list(self, request):\n\n viewset_list = [\n 'User\\'s action (list,create,retrieve ,update , partial_update)',\n 'Automatically maps to the urls using Routers.',\n 'Provides more functionality with less code.',\n ]\n\n return Response({'message':'Hello From ViewSet' , 'viewset':viewset_list})",
"def actors_listing(request,option=None):\n\n if option == \"csv\":\n return generate_actor_csv(request)\n return generate_actor_jtable(request, option)",
"def get_action(self, context):\n pass",
"def committee_list(request):\n committees = Committee.objects.all().order_by(\"name\")\n\n context = {\"committees\": committees}\n template = \"jurycore/committee_list.html\"\n return render(request, template, context)",
"def actions():\n pass",
"async def audit_actions(self, ctx: Context) -> None:\n\n if ctx.invoked_subcommand is None:\n await ctx.send_help('auditaction')",
"def actions(self):\n return self._action_list"
] | [
"0.6613559",
"0.63822025",
"0.6160403",
"0.61327785",
"0.61140096",
"0.5988209",
"0.5908296",
"0.5907843",
"0.5899233",
"0.5892363",
"0.5886298",
"0.58746743",
"0.58155996",
"0.57719344",
"0.5768285",
"0.5741835",
"0.5715916",
"0.57119167",
"0.5709055",
"0.5664173",
"0.5616768",
"0.5592838",
"0.55697787",
"0.55644953",
"0.5532066",
"0.55105495",
"0.5503542",
"0.5502318",
"0.5494629",
"0.5478764"
] | 0.8160547 | 0 |
View for detail of action | def action_detail(request, action_id):
employee = request.user.employee_user.first()
action = Action.objects.get(pk=int(action_id))
# if not employee.isEnsoUser() and employee.company.pk != action.employee.company.pk:
if not employee.hasAccessTo(action.employee):
raise PermissionDenied()
if request.method == 'POST':
form = ActionCommentForm(request.POST)
if form.is_valid():
form.save(request.user, action)
return HttpResponseRedirect('/action/%s' % action_id)
else:
form = ActionCommentForm()
return TemplateResponse(
request,
'mus/action_detail.html',
dict(
action=action,
form=form
)
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __str__(self):\n return _action_args_dict[self.action].name",
"def action(self):\n pass",
"def action(self):\n pass",
"def view(self):",
"def get_action(self, context):\n pass",
"def show(self, *args, **kwargs) -> None:\n pass",
"def show(self, *args, **kwargs) -> None:\n pass",
"def show(self, *args, **kwargs) -> None:\n pass",
"def __str__(self):\n return str(self.get_action_display())",
"def details_view(self):\n return_url = get_redirect_target() or self.get_url('.index_view')\n\n if not self.can_view_details:\n return redirect(return_url)\n\n id = get_mdict_item_or_list(request.args, 'id')\n if id is None:\n return redirect(return_url)\n\n model = self.get_one(id)\n\n if model is None:\n flash(gettext('Record does not exist.'), 'error')\n\n if self.details_modal and request.args.get('modal'):\n template = self.details_modal_template\n else:\n template = self.details_template\n\n relationship_views = []\n for relationship in self.model_relationship_views:\n relationship_view = self.model_relationship_views[relationship]\n bp = relationship_view.blueprint\n endpoint = '{}.ajax_config'.format(relationship_view.blueprint.name)\n data = {\n 'field': relationship,\n 'title': relationship_view.title,\n 'config_url': self.get_url(endpoint, model_id=id)\n }\n relationship_views.append(data)\n\n return self.render(\n template,\n model=model,\n details_columns=self._details_columns,\n get_value=self.get_detail_value,\n relationship_views=relationship_views,\n return_url=return_url\n )",
"def print_details(self):\n self.view.print_details()",
"def detail(self, req):\n return self.index(req)",
"def obtain_action(self):\r\n\t\treturn",
"def action(action_id):\n action = Action.query.filter_by(\n id=action_id, username=current_user.username\n ).first_or_404()\n return jsonify(\n dict(\n action_id=action.id,\n action_name=action.name,\n action_type=action.type.value,\n details=action.details,\n )\n )",
"def action(self) -> str:\n return pulumi.get(self, \"action\")",
"def _action(self):\n pass",
"def detail_view(self, request, pk):\n instance = self.get_object()\n if self.revision_wanted is not None:\n instance = get_object_or_404(\n instance.revisions, id=self.revision_wanted).as_page_object()\n elif self.is_preview:\n instance = instance.get_latest_revision_as_page()\n serializer = self.get_serializer(instance)\n return Response(serializer.data)",
"def actionURL(self):\n raise NotImplementedError()",
"def action_type(self):",
"def show(self):\n\n pass",
"def show(self) -> None:",
"def view_item(request, item_pk):\n return HttpResponse('This is where we view item ' + item_pk)",
"def action(self):\n return self._get_field(\"action\")",
"def get_action(self):\n raise NotImplementedError",
"def briefing_action(self, query):\n raise NotImplementedError()\n pass",
"def getAction(self, state):\n util.raiseNotDefined()",
"def getAction(self, state):\n util.raiseNotDefined()",
"def action_edit(request, action_id):\n employee = request.user.employee_user.first()\n action = Action.objects.get(pk=action_id)\n if not employee.isEnsoUser() and employee.company.pk != action.employee.company.pk:\n raise PermissionDenied()\n # if request.method == 'POST':\n form = ActionForm(request.POST, instance=action)\n if form.is_valid():\n form.save(request.user, employee)\n return HttpResponseRedirect('/action/%d' % form.instance.pk)\n # else:\n # form = ActionForm(instance=action)\n # return TemplateResponse(\n # request,\n # 'mus/action_edit.html',\n # dict(\n # form=form,\n # edit=True\n # )\n # )\n\n # return JsonResponse(status=200, data={\"data\": form.instance.title, \"edit\": True})",
"def action(self):\n return self._action",
"def action(self):\n return self._action"
] | [
"0.67003417",
"0.6600345",
"0.6600345",
"0.6525399",
"0.6474398",
"0.6430569",
"0.6430569",
"0.6430569",
"0.6398873",
"0.6335036",
"0.62369657",
"0.62285584",
"0.6216931",
"0.6206771",
"0.61830616",
"0.61691725",
"0.6162804",
"0.6153047",
"0.61520946",
"0.60394746",
"0.60298425",
"0.6023776",
"0.60130507",
"0.59975535",
"0.59950846",
"0.5987625",
"0.5987625",
"0.5985095",
"0.5970366",
"0.5970366"
] | 0.73323816 | 0 |
Create LeaderModel and send it as a PDF to the browser | def get_leader_model_pdf(currentEmpl, employees):
lm = LeaderModel()
employee_actions = {}
legend = []
colors = {}
errors = {'noactions': []}
# numbered_actions = {}
for empl in employees:
if not currentEmpl.hasAccessTo(empl):
raise PermissionDenied()
actions = empl.action_set.all()
if not len(actions):
errors['noactions'].append(empl)
continue
lkey = empl.user.first_name + " " + empl.user.last_name
legend.append(lkey)
if not lkey in employee_actions:
employee_actions[lkey] = {}
for action in actions:
if not action.difficulty or not action.type:
errors['noactions'].append(empl)
continue
circle_number = lm.addCircle(action)
latest_comment = action.getLatestComment()
employee_actions[lkey][circle_number] = {
'name': action.title,
'type': action.type,
'difficulty': action.getDifficultyText(),
'comment': latest_comment
}
if lkey not in colors:
color = lm.getEmployeeColors(empl.id)
colors[lkey] = "rgb({}, {}, {})".format(color[0], color[1], color[2])
if len(errors['noactions']):
return errors
lm_filename = path.join(settings.STATIC_ROOT, "leadermodel_{}.png".format(currentEmpl.id))
lm.writeImage(lm_filename)
#
# Write PDF
pdfFilename = path.join(settings.FILES_ROOT, "leadermodel_{}.pdf".format(currentEmpl.id))
template = get_template('mus/leader_model_pdf.html')
context = Context({
'site_url': settings.SITE_URL,
'lm_filename': lm_filename,
'employee_actions': employee_actions,
'colors': colors,
'legend': legend
})
html = template.render(context)
# html = html.replace('<li>','<li><img class="square" src="http://test.nxtlvl.dk/static/img/square.png" />')
result = open(pdfFilename, 'wb')
pisa.pisaDocument(StringIO.StringIO(
html.encode("UTF-8")), dest=result)
result.close()
wrapper = FileWrapper(file(pdfFilename))
response = HttpResponse(wrapper, content_type='application/pdf')
response['Content-Disposition'] = 'attachment;filename=ledermodel.pdf'
response['Content-Length'] = os.path.getsize(pdfFilename)
return response
# return HttpResponseRedirect('/employee/all/%d' % int(company_id)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_leader_model(request, company_id):\n\n errors = {'noactions': []}\n company = Company.objects.get(pk=company_id)\n currentEmpl = Employee.objects.get(user__pk=request.user.pk)\n \"\"\":type : Employee \"\"\"\n\n if not currentEmpl.isEnsoUser() and currentEmpl.company.pk != company.pk:\n raise PermissionDenied()\n\n if currentEmpl.isCompanySuperUserOrHigher():\n employeeQS = Employee.objects.filter(\n company__pk=company_id\n )\n else:\n employeeQS = Employee.objects.filter(\n Q(manager=currentEmpl),\n company__pk=company_id\n )\n\n form = MultiLeaderModelForm(request.POST or None)\n form.fields['employees'].queryset = employeeQS\n\n if request.method == 'POST' and form.is_valid():\n\n employees = form.cleaned_data['employees']\n \"\"\":type : list[Employee] \"\"\"\n\n pdf_response = get_leader_model_pdf(currentEmpl, employees)\n\n if isinstance(pdf_response, HttpResponse):\n return pdf_response\n else:\n errors = pdf_response\n\n print(errors)\n\n return TemplateResponse(\n request,\n 'mus/create_leader_model.html', {\n 'form': form,\n 'company': company,\n 'errors': errors\n }\n )",
"def pdfReceiver(request, model=''):\n\n\tinput_str = ''\n\tinput_str += parsePOST(request)\n\t# packet = io.StringIO() # write to memory\n\tpacket = io.BytesIO()\n\n\ttry:\n\t\tpisa.CreatePDF(input_str, dest=packet)\n\texcept ValueError as error:\n\t\t# triggered from the elusive invalid color value issue:\n\t\tlogging.warning(\"elusive invalid color value, defaulting html background-color to FFFFFF\")\n\t\tpisa.CreatePDF(input_str, dest=packet, default_css=\"body{background-color:#FFFFFF;}\")\n\n\n\tjid = MetabolizerCalc().gen_jid() # create timestamp\n\tresponse = HttpResponse(packet.getvalue(), content_type='application/pdf')\n\tresponse['Content-Disposition'] = 'attachment; filename=' + model + '_' + jid + '.pdf'\n\tpacket.close() # todo: figure out why this doesn't solve the 'caching problem'\n\treturn response",
"def create_pdf(request):\n\n contact_info = ContactDetails.objects.iterator()\n\n # Create a file-like buffer to receive PDF data.\n buffer = io.BytesIO()\n\n # Create the PDF object, using the buffer as its \"file.\"\n pdf_file = canvas.Canvas(buffer)\n\n # Draw things on the PDF. Here's where the PDF generation happens\n pdf_file.setTitle(\"Contact Infomation\")\n pdf_file.setFont(\"Helvetica-Bold\", 20, leading=None)\n pdf_file.setFillColorRGB(1,0,0)\n pdf_file.drawString( 60, 800, \"Stefanos Taramas Contact Information\")\n pdf_file.setFillColorRGB(0,0,0)\n pdf_file.setFont(\"Helvetica\", 15, leading=None)\n\n for index, item in enumerate(contact_info):\n line = str(index + 1) +\") \" + str(item.contact_name) + \": \" + str(item.contact_info)\n column = 50\n row = 750 - 15*index\n pdf_file.drawString(column, row, line)\n\n # Close the PDF object cleanly\n pdf_file.showPage()\n pdf_file.save()\n\n # FileResponse sets the Content-Disposition header so that browsers\n # present the option to save the file.\n buffer.seek(0)\n\n return FileResponse(buffer, as_attachment=True, filename='StefanosTaramasContactInfo.pdf')",
"def bundle(handler, model):\n\n notebook_filename = model['name']\n notebook_name = os.path.splitext(notebook_filename)[0]\n pdf_filename = '{}.pdf'.format(notebook_name)\n\n with io.BytesIO() as pdf_buffer:\n pdf_body = convert_notebook_to_pdf(model)\n pdf_buffer.write(pdf_body)\n\n handler.set_attachment_header(pdf_filename)\n handler.set_header('Content-Type', 'application/pdf')\n\n # Return the buffer value as the response\n handler.finish(pdf_buffer.getvalue())",
"def generate_pdf(request):\n reg_no = request.user.username\n user = get_object_or_404(User, username=reg_no)\n user_profile = user.get_profile()\n user_application = user_profile.application\n np = user_application.np\n \n response = HttpResponse(mimetype='application/pdf')\n response['Content-Disposition'] = 'attachment; filename=JAM2012_Allottment.pdf'\n \n elements = []\n doc = SimpleDocTemplate(response)\n \n formatted_time = time.ctime()\n styles = getSampleStyleSheet()\n styles.add(ParagraphStyle(name='Justify', alignment=TA_JUSTIFY))\n \n ptext = '<font size=15>JAM 2012 - Admissions.</font>' \n elements.append(Paragraph(ptext, styles[\"Justify\"]))\n elements.append(Spacer(4, 20))\n \n ptext = '<font size=12>Registration Number: %s</font>' % reg_no \n elements.append(Paragraph(ptext, styles[\"Normal\"]))\n elements.append(Spacer(1, 12))\n \n data = [] \n options = get_chosen_options(user) ##Put a check to show when the options chosen is empty\n \n if not(options):\n ptext = '<font size=12>No choices were selected.</font>' \n elements.append(Paragraph(ptext, styles[\"Normal\"]))\n elements.append(Spacer(1, 12))\n doc.build(elements) \n return response \n \n ptext = '<font size=12>The choices selected by me are as follows: </font>' \n elements.append(Paragraph(ptext, styles[\"Normal\"]))\n elements.append(Spacer(4, 30))\n \n counter = 1\n for opt in options:\n data.append([counter, opt.opt_code, opt.opt_location, opt.opt_name])\n counter = counter + 1\n \n t = Table(data)\n t.setStyle(TableStyle([('GRID',(0,0),(3,len(options)),1,colors.black),\n ('TEXTCOLOR',(0,0),(0,-1),colors.green)]))\n \n elements.append(t) \n \n elements.append(Spacer(4, 30))\n \n ptext = '<font size=12>I hereby declare that the order of preference given by me for my eligible programmes is final. </font>' \n elements.append(Paragraph(ptext, styles[\"Normal\"]))\n elements.append(Spacer(4, 25))\n \n ptext = '<font size=12>Signature of the Candidate</font>' \n elements.append(Paragraph(ptext, styles[\"Normal\"]))\n elements.append(Spacer(4, 20))\n \n ptext = '<font size=12>%s</font>' % formatted_time\n elements.append(Paragraph(ptext, styles[\"Normal\"]))\n elements.append(Spacer(1, 12))\n \n doc.build(elements)\n \n return response",
"def _create_pdf(self, survey, response):\n pdf_transformer = PDFTransformer(survey, response)\n self._pdf, self._page_count = pdf_transformer.render_pages()\n return self._pdf",
"def createpdf():\n with app.app_context():\n # Get form data\n if request.form:\n data = request.form\n else:\n return 'no form'\n msg = {}\n msg['name'] = data['name']\n msg['role'] = data['role']\n msg['unit'] = data['unit']\n msg['unitdetail'] = data['unitdetail']\n msg['phone'] = data['phone']\n msg['email'] = data['email']\n msg['employmentdate'] = data['employmentdate']\n filename = 'default.png'\n if 'img' in request.files:\n file = request.files['img']\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename).replace(\"_\",\"\")\n portraitFilePath = os.path.join(app.config['IMAGE_UPLOADS'], filename)\n file.save(portraitFilePath)\n if 'presentation' in data:\n msg['presentation'] = data['presentation']\n if 'edu-title' in data:\n msg['edu'] = [{'title': i, 'time': j} for i, j in zip(request.form.getlist('edu-title'), request.form.getlist('edu-time'))]\n msg['edu'].sort(key = itemgetter('title'))\n msg['edu'].sort(key = itemgetter('time'), reverse=True)\n if 'emp-title' in data:\n msg['emp'] = [{'title': i, 'time': j} for i, j in zip(request.form.getlist('emp-title'), request.form.getlist('emp-time'))]\n msg['emp'].sort(key = itemgetter('title'))\n msg['emp'].sort(key = itemgetter('time'), reverse=True)\n if 'cou-title' in data:\n msg['cou'] = [{'title': i, 'time': j} for i, j in zip(request.form.getlist('cou-title'), request.form.getlist('cou-time'))]\n msg['cou'].sort(key = itemgetter('title'))\n msg['cou'].sort(key = itemgetter('time'), reverse=True)\n if 'ass-title' in data:\n msg['ass'] = [{'title': i, 'company': j, 'role': k, 'descr': l, 'time': m} for i,j,k,l,m in zip(request.form.getlist('ass-title'), request.form.getlist('ass-company'), request.form.getlist('ass-role'), request.form.getlist('ass-descr'), request.form.getlist('ass-time'))]\n msg['ass'].sort(key = itemgetter('title'))\n msg['ass'].sort(key = itemgetter('time'), reverse=True)\n\n cv = TEXTEMPLATE.render(msg = msg, portrait = 'img/' + filename)\n pdf = writeTex(cv, app.config[\"OUT_DIR\"], filename)\n deleteImgUpload(filename)\n return redirect(\"/getpdf/\" + pdf)",
"def toPDF(Infos):\n\n\n #returnPDF = PDFDocument(\"output\")\n #returnPDF.Infos.get(\"name\")\n returnPDF = PDF(\"Courier\", Infos.get(\"name\"))\n if Infos.get('contact'):\n returnPDF.contact(Infos.get(\"contact\"))\n if Infos.get('Current position'):\n returnPDF.currentposition(Infos.get(\"Current position\"))\n if Infos.get('Education'):\n returnPDF.currentposition(Infos.get(\"Education\"))\n if Infos.get('Langue'):\n returnPDF.currentposition(Infos.get(\"Langue\"))\n returnPDF.output(\"result.pdf\", 'F')",
"def create_bill_pdf(obj):\n data = {\n 'today': datetime.date.today(),\n 'amount': obj.price,\n 'customer_name': obj.company.company_name,\n 'order_id': obj.pk,\n }\n pdf = render_to_pdf('pdf/invoice.html', data)\n filename = obj.company.company_name + '_' + obj.promotion.campaign_name + '_' + \\\n datetime.datetime.now().strftime(\"%Y-%m-%d\") + '.pdf'\n obj.bill.save(filename, File(io.BytesIO(pdf.content)))",
"def make_pdf(self, net_id, request_id, request_date):\n with open(\"{0}/user_uploads/{1}/{2}/submission.json\".format(self.__APP_PATH__, net_id, request_id), mode=\"r\") as json_file:\n request_details = json.load(json_file)\n\n files_text = \"\"\n travel_text = None\n\n if request_details[\"request_type\"] == \"travel\":\n travel_text = \"\\n\\nTravel Details:\\n\" \\\n \"\\t\\t\\t\\tTravel from: {0} ({1})\\n\" \\\n \"\\t\\t\\t\\tTravel to: {2} ({3})\\n\" \\\n \"\\t\\t\\t\\tTravel Number: {4}\\n\" \\\n \"\\t\\t\\t\\tEvent Website: {5}\".format(request_details[\"travel_from\"],\n request_details[\"travel_from_date\"],\n request_details[\"travel_to\"],\n request_details[\"travel_to_date\"],\n request_details[\"travel_number\"],\n request_details.get(\"event_website\", \"N/A\"))\n for file in request_details[\"files\"]:\n amount_text = \"${0}\".format(file[\"dollar_amount\"]) if file[\"dollar_amount\"] > 0.0 else \"Auxiliary File\"\n files_text += \"\\t\\t\\t\\t{0} ({1})\\n\\t\\t\\t\\t\\t\\t\\t\\t\" \\\n \"{2}\\n\\t\\t\\t\\t\\t\\t\\t\\t{3}\\n\\n\".format(file[\"label\"], amount_text,\n file[\"name\"], file[\"description\"])\n\n if request_details[\"notes\"].strip():\n request_notes = \"\\nNotes:\\n{0}\".format(request_details[\"notes\"].strip())\n else:\n request_notes = \"\"\n\n pdf_title = \"({0}) {1:02d}/{2:02d}/{3:04d} - {4:02d}:{5:02d}:{6:02d}, Amount: ${7}\".format(\n request_details[\"request_date\"][\"weekday\"], request_details[\"request_date\"][\"month\"],\n request_details[\"request_date\"][\"day\"], request_details[\"request_date\"][\"year\"],\n request_details[\"request_date\"][\"hours\"], request_details[\"request_date\"][\"minutes\"],\n request_details[\"request_date\"][\"seconds\"], request_details[\"total_amount\"])\n\n if request_details[\"pay_to\"][\"id\"]:\n pay_to_details = \"{0} ({1}, {2})\".format(request_details[\"pay_to\"][\"name\"], request_details[\"pay_to\"][\"id\"],\n request_details[\"pay_to\"][\"email\"])\n else:\n pay_to_details = \"{0} ({1})\".format(request_details[\"pay_to\"][\"name\"], request_details[\"pay_to\"][\"email\"])\n\n pdf_body = \"{0}{1}\\n\\nRequestee: \\n\\t\\t\\t\\tAccount:{2}\\n\\t\\t\\t\\tName: {3} {4} ({5})\\n\\t\\t\\t\\t\" \\\n \"Phone: {6}\\t|\\tNet ID: {7}\\t\\n\\nPay To:\\n\\t\\t\\t\\tName: {8}{9}\\n\\n\" \\\n \"Files:\\n{10}\".format(request_details[\"short_description\"], request_notes,\n request_details[\"account_number\"],\n request_details[\"requester\"][\"first_name\"],\n request_details[\"requester\"][\"last_name\"],\n request_details[\"requester\"][\"email\"],\n request_details[\"requester\"][\"phone_number\"],\n request_details[\"requester\"][\"net_id\"],\n pay_to_details,\n travel_text,\n files_text)\n try:\n logo_path = \"{0}/static/assets/main/uta_logo.png\".format(self.__APP_PATH__.split(\"/apps/\")[0])\n pdf = PDFMaker(**{\"title\": \"Reimbursement Request Report\"})\n\n pdf.set_margins(left=19.05, top=19.05, right=19.05)\n pdf.set_auto_page_break(auto=True, margin=19.05)\n pdf.set_author(\"MavApps - Reimbursement App\")\n pdf.print_page(pdf_title, pdf_body)\n pdf.image(logo_path, x=53, y=11, w=107, h=10, type=\"PNG\", link=\"https://uta.edu\")\n pdf.output(\"{0}/user_uploads/{1}/{2}/[{1}-{3}]_report.pdf\".format(self.__APP_PATH__, net_id, request_id, request_date), \"F\")\n except Exception as e:\n print(e)\n return False\n return True",
"def book(**kwargs):\n print(\"pdf created\")",
"def _produce_pdf_as_a_response(self, html):\n # Create a Django response object, and specify content_type as pdf\n response = HttpResponse(content_type='application/pdf')\n # Define that this is an attachment. \n response['Content-Disposition'] = 'attachment;'\n pisaStatus = pisa.CreatePDF(html, dest=response)\n \n return response",
"def generate_pdf(self):\n x = 100\n y = 100\n buffer = BytesIO()\n p = canvas.Canvas(buffer, pagesize=\"A4\")\n p.drawString(x, y, \"TO DO\")\n p.showPage()\n p.save()\n pdf = buffer.getvalue()\n buffer.close()\n return pdf",
"def generate():\n # Create the list of article from our data\n generator = GenerateLDA()\n generator.generateLDA()\n return jsonify({\"code\": 200, \"message\" : \"LDA model successfully created.\"})",
"def generate_contract_de_fdf_pdf(user):\n if DEBUG: # pragma: no cover\n print \"===== this is generate_fdf_pdf\"\n from fdfgen import forge_fdf\n fields = [\n ('surname', user.surname),\n ('lastname', user.lastname),\n ('street', user.street),\n ('number', user.number),\n ('postcode', user.postcode),\n ('city', user.city),\n ('email', user.email),\n ('user_id', user.id),\n ('username', user.username),\n ('date_registered', user.date_registered),\n ('date_generated', datetime.now()),\n ]\n #generate fdf string\n fdf = forge_fdf(\"\", fields, [], [], [])\n # write to file\n my_fdf_filename = \"fdf\" + str(user.id) + \".fdf\"\n\n fdf_file = open(my_fdf_filename, \"w\")\n fdf_file.write(fdf)\n fdf_file.close()\n if DEBUG: # pragma: no cover\n print \"fdf file written.\"\n\n res = os.popen(\n 'pdftk pdftk/berechtigungsvertrag-2.2.pdf fill_form %s output'\n ' formoutput.pdf flatten' % my_fdf_filename).read()\n\n if DEBUG: # pragma: no cover\n print res\n print \"done: put data into form and finalized it\"\n\n # delete the fdf file\n res = os.popen('rm %s' % my_fdf_filename)\n if DEBUG: # pragma: no cover\n print res\n print \"combining with bank account form\"\n # combine\n res = os.popen(\n 'pdftk formoutput.pdf pdftk/bankaccount.pdf output output.pdf').read()\n if DEBUG: # pragma: no cover\n print res\n print \"combined personal form and bank form\"\n\n # delete the fdf file\n os.popen('rm formoutput.pdf').read()\n\n # return a pdf file\n from pyramid.response import Response\n response = Response(content_type='application/pdf')\n response.app_iter = open(\"output.pdf\", \"r\")\n return response",
"def create_pdf(self):\n\n my_datetime = datetime.now()\n self.pdf_name = (\n self.pdf_name + \"_\" + my_datetime.strftime(\"%H%M_%d%m%Y\") + \".pdf\"\n )\n fig_width = aW * self.column_ratio[0]\n\n clm_width_meta = (aW * self.column_ratio[1]) / len(self.fields)\n\n c = canvas.Canvas(os.path.join(self.pdf_folder, self.pdf_name), pagesize=A4)\n\n for qc_run_id, fig_file in sorted(self._files.items()):\n (param_values, feature_values) = get_param_values(\n qc_run_id, self.db_name, return_meta_add_on=True\n )\n\n comment = self.subject + \"<br/>\"\n # c.saveState()\n title = \"Dataset \" + qc_run_id\n\n # Prepare header\n header = Paragraph(title, title_style)\n h_w, h_h = header.wrap(aW, aH)\n\n # Prepare image\n img = ImageReader(fig_file)\n im_width, im_height = img.getSize()\n aspect = im_height / float(im_width)\n fig_height = fig_width * aspect\n\n # Prepare metadata section\n\n meta_table = Table(\n param_values,\n colWidths=[clm_width_meta] * len(self.fields),\n hAlign=\"CENTER\",\n rowHeights=0.22 * inch,\n )\n meta_table.setStyle(\n TableStyle(\n [\n (\"FONT\", (0, 0), (-1, 0), \"Helvetica-Bold\"),\n (\"FONT\", (0, 1), (-1, -1), \"Helvetica\"),\n (\"LINEBELOW\", (0, 0), (1, 0), 0.08, colors.black),\n (\"SIZE\", (0, 0), (-1, -1), 8),\n (\"VALIGN\", (0, 0), (-1, -1), \"BOTTOM\"),\n # ('ALIGN', (0, 0), (-1, 0), 'CENTER'),\n (\"ALIGN\", (0, 0), (0, -1), \"LEFT\"),\n (\"ALIGN\", (1, 1), (1, -1), \"LEFT\"),\n (\"INNERGRID\", (0, 0), (-1, -1), 0.08, colors.beige),\n # ('BOX', (0,0), (-1,-1), 0.25, colors.grey),\n ]\n )\n )\n\n meta_width, meta_height = meta_table.wrap(aW - im_width, aH / 2)\n\n # Prepare comments header\n comments_header = Paragraph(\"Comments:\", title_style)\n avail_height = aH - fig_height - v_padding\n comm_h_width, comm_h_height = comments_header.wrap(\n im_width, avail_height # aW - meta_width,\n )\n # Prepare comments\n my_datetime = datetime.now()\n ts = \"Printed on \" + my_datetime.strftime(\"%c\")\n\n try:\n data_specific_comment = self.comments[int(qc_run_id)]\n comment += data_specific_comment + \"<br/>\"\n comment += self.comments[\"general\"] + \"<br/>\"\n\n comment += self.smalltalk + \"<br/>\"\n except Exception:\n logger.warning(\n \"Unable to summarize result of \" + \"dataset {}\".format(qc_run_id)\n )\n comment_ts = comment + ts\n comment_ts = textwrap.fill(comment_ts, 70)\n comment_ts = comment_ts.replace(\"\\n\", \"<br/>\")\n\n comments_p = Paragraph(comment_ts, body_style)\n\n avail_height = aH - fig_height - v_padding - comm_h_height\n\n comm_width, comm_height = comments_p.wrap(im_width, avail_height) # aW,\n\n line_widths = comments_p.getActualLineWidths0()\n number_of_lines = len(line_widths)\n if number_of_lines > 1:\n pass\n if number_of_lines == 1:\n min(line_widths)\n comm_width, comm_height = comments_p.wrap(im_width, avail_height)\n\n # Prepare features\n feat_table = Table(\n feature_values,\n colWidths=[clm_width_meta] * len(self.fields),\n hAlign=\"CENTER\",\n rowHeights=0.22 * inch,\n )\n feat_table.setStyle(\n TableStyle(\n [\n (\"FONT\", (0, 0), (-1, 0), \"Helvetica-Bold\"),\n (\"FONT\", (0, 1), (-1, -1), \"Helvetica\"),\n (\"LINEBELOW\", (0, 0), (1, 0), 0.08, colors.black),\n (\"SIZE\", (0, 0), (-1, -1), 8),\n (\"VALIGN\", (0, 0), (-1, -1), \"BOTTOM\"),\n # ('ALIGN', (0, 0), (-1, 0), 'CENTER'),\n (\"ALIGN\", (0, 0), (0, -1), \"LEFT\"),\n (\"ALIGN\", (1, 1), (1, -1), \"LEFT\"),\n (\"INNERGRID\", (0, 0), (-1, -1), 0.08, colors.beige),\n # ('BOX', (0,0), (-1,-1), 0.25, colors.grey),\n ]\n )\n )\n avail_height = aH - meta_height # fig_height - v_padding - comm_h_height\n avail_height -= comm_height\n feat_width, feat_height = feat_table.wrap(aW - im_width, avail_height)\n\n # Draw everyting on canvas\n\n header.drawOn(c, left_margin, aH - top_margin)\n\n c.drawImage(\n img,\n left_margin,\n aH - top_margin - fig_height - v_padding,\n width=fig_width * 1.1,\n height=fig_height * 1.1,\n mask=\"auto\",\n )\n\n meta_table.drawOn(\n c,\n left_margin + fig_width + h_padding,\n aH - meta_height - top_margin / 2, # - v_padding\n )\n\n comments_header.drawOn(\n c,\n left_margin,\n aH\n - top_margin\n - comm_h_height\n - fig_height\n - 2 * v_padding, # - add_on_height\n )\n\n comments_p.drawOn(\n c,\n left_margin,\n aH\n - top_margin\n - comm_h_height\n - comm_height\n - fig_height\n - 2 * v_padding\n - comm_h_height, # - add_on_height\n )\n\n feat_table.drawOn(\n c,\n left_margin + fig_width + h_padding,\n aH - meta_height - top_margin / 2 - feat_height - v_padding,\n # top_margin - fig_height - 2*v_padding - feat_height\n )\n\n # new page\n c.showPage()\n c.saveState()\n\n c.save()",
"def exportTable(self):\n\t\tself.pdf = \tself.dir + \"/application.pdf\"\n\t\tpdf = pisa.CreatePDF(\n\t\t\tfile(self.html, \"r\" ),\n\t\t\tfile(self.pdf, \"wb\")\n\t\t\t)",
"def make_pdf(self):\n source = self.get_page_source()\n if not source:\n self.errors.append('no_source')\n if not self.errors:\n self.generate_pdf_file(source)",
"def generate_pdf(list,id):\n\n doc = SimpleDocTemplate(settings.STATIC_ROOT+\"/tests/\"+str(id)+\"/\"+str(id)+\".pdf\")\n\n Story = [Spacer(1,2*inch)]\n styles = stylesheet()\n global Title\n\n # Add 10 questions with boxes below\n for i in list:\n if not i[0] in \"skills-scan\" and not i[0] in \"csrfmiddlewaretoken\" and not i[0] in \"titre\" and not i[0] in \"custom\":\n tmp = int(i[0])+1\n bogustext = (str(tmp)+\". %s\" % i[1])\n p = Paragraph(bogustext, styles['default'])\n # Write the paragraph\n\n draw = Drawing()\n # rect(x1,y1,width,height)\n rec = Rect(0, 100, 450, 150)\n rec.fillColor = colors.white\n # draw the rect under each paragraph\n draw.add(rec)\n p.keepWithNext = True\n Story.append(p)\n Story.append(draw)\n Story.append(Spacer(1,-0.9 * inch))\n elif i[0] in \"titre\":\n Title = i[1]\n # build the document by inserting the whole story\n doc.build(Story, onFirstPage=myFirstPage, onLaterPages=myLaterPages)\n return str(id)+\".pdf\"",
"def create_pdf(submission):\n # Get questions from sections\n fields = {}\n for section in submission.questions:\n for form in section[\"forms\"]:\n for field in form[\"fields\"]:\n fs = field.get(\"fields\", [field])\n for f in fs:\n fields[f[\"name\"]] = f\n\n # Pull out image and answers\n images = []\n docs = []\n answers = []\n for answer in submission.answers:\n answer, name = answer.get(\"answer\", \"\"), answer.get(\"name\", \"\")\n field = fields[name]\n if field[\"type\"] == \"FILE\":\n image_ids = []\n doc_ids = []\n for file in answer:\n if \"image\" in file:\n image_ids.append(file[\"id\"])\n elif \"file\" in file:\n doc_ids.append(file[\"id\"])\n\n if image_ids:\n images += [\n image_upload.image\n for image_upload in ImageUpload.objects.filter(\n pk__in=image_ids\n ).all()\n ]\n if doc_ids:\n docs += [\n file_upload.file\n for file_upload in FileUpload.objects.filter(pk__in=doc_ids).all()\n ]\n else:\n answers.append(\n {\n \"name\": name.lower().replace(\"_\", \" \").capitalize(),\n \"prompt\": field.get(\"prompt\", \"\"),\n \"answers\": answer if type(answer) is list else [answer],\n }\n )\n\n context = {\n \"submission\": submission,\n \"answers\": answers,\n \"images\": images,\n \"docs\": docs,\n }\n pdf_html_str = render_to_string(\"client-intake.html\", context=context)\n pdf_bytes = weasyprint.HTML(string=pdf_html_str).write_pdf()\n return pdf_bytes",
"def downlaod():\r\n filename = str(uuid.uuid4()) + '.pdf'\r\n filename = os.path.join('./output' , filename)\r\n\r\n config = pdfkit.configuration(wkhtmltopdf = PRG_Path)\r\n options = {\r\n 'page-size': 'Letter'\r\n ,'margin-top': '0.75in'\r\n ,'margin-right': '0.75in'\r\n ,'margin-bottom': '0.75in'\r\n ,'margin-left': '0.75in'\r\n ,'no-outline': None\r\n ,'encoding':'UTF-8'\r\n ,'enable-local-file-access':None\r\n ,'quiet': ''\r\n # ,'javascript-delay':2000000\r\n }\r\n\r\n\r\n html = create_html_report()\r\n pdf = pdfkit.from_string(input=html, output_path=filename,configuration=config, options=options)\r\n pdfDownload = open(filename,'rb').read()\r\n\r\n response: Response = Response (\r\n pdfDownload\r\n ,mimetype=\"application/pdf\"\r\n ,headers={\r\n \"Content-disposition\": \"attachment; filename=\" + filename\r\n ,\"Content-type\": \"application/force-download\"\r\n }\r\n )\r\n return response",
"def produce_summary_pdf(model_name, img_path, hyperparams, model_arch, train_stats):\n # datetime object containing current date and time\n now = datetime.now()\n dt_string = now.strftime(\"%d/%m/%Y %H:%M:%S\")\n\n pdf = FPDF()\n pdf.set_title(\"training_summary_{}_{}\".format(model_name.lower(), dt_string))\n pdf.add_page()\n pdf.set_xy(0, 10)\n pdf.set_font(\"Helvetica\", \"BI\", 16)\n pdf.set_text_color(25, 33, 78)\n pdf.set_draw_color(25, 33, 78)\n pdf.cell(20)\n pdf.cell(\n 200,\n 10,\n \"Model Training Summary: {}\".format(model_name.upper()),\n 0,\n 2,\n )\n pdf.set_font(\"Helvetica\", \"B\", 12)\n pdf.cell(\n 200,\n 5,\n dt_string,\n 0,\n 2,\n )\n\n # Model Configuration Section\n pdf.cell(150, 10, \"Model Configuration:\", 0, 2)\n pdf.cell(30, 10, \"Parameter\", 1, 0)\n pdf.cell(140, 10, \"Value\", 1, 2)\n pdf.set_text_color(255, 96, 80)\n pdf.set_font(\"Helvetica\", \"\", 12)\n pdf.cell(-30)\n attributes = [\n \"model_dir\",\n \"log_dir\",\n \"check_dir\",\n \"current_epoch\",\n \"overwrite\",\n \"exp_name\",\n ]\n for i, val in enumerate(hyperparams):\n if val not in attributes:\n pdf.cell(30, 10, \"%s\" % (val), 1, 0)\n pdf.cell(140, 10, \"%s\" % (hyperparams[val]), 1, 2)\n pdf.cell(-30)\n pdf.cell(90, 3, \"\", 0, 2)\n\n # Model Performance Section\n pdf.set_text_color(25, 33, 78)\n pdf.set_font(\"Helvetica\", \"B\", 12)\n pdf.cell(150, 10, \"Model Performance Stats:\", 0, 2)\n pdf.set_font(\"Helvetica\", \"\", 12)\n\n loss = train_stats[\"test_loss\"]\n acc = train_stats[\"test_acc\"]\n\n pdf.set_text_color(255, 96, 80)\n pdf.cell(35, 6, \"Best Loss:\", 0, 0)\n pdf.cell(\n 45, 6, \"{:.3f} (Epoch {})\".format(min(loss), loss.index(min(loss)) + 1), 0, 0\n )\n pdf.cell(60, 6, \"Training Duration:\", 0, 0)\n pdf.cell(30, 6, \"{:.3f} (s)\".format(train_stats[\"total_dur\"]), 0, 2)\n pdf.cell(-140)\n pdf.cell(35, 6, f\"Best Accuracy:\", 0, 0)\n pdf.cell(45, 6, \"{:.3f} (Epoch {})\".format(max(acc), acc.index(max(acc)) + 1), 0, 0)\n pdf.cell(60, 6, \"Average Epoch Duration:\", 0, 0)\n pdf.cell(\n 30,\n 6,\n \"{:.3f} (s)\".format(train_stats[\"total_dur\"] / hyperparams[\"current_epoch\"]),\n 0,\n 2,\n )\n pdf.cell(-140)\n pdf.cell(90, 3, \"\", 0, 2)\n\n # Loss Curve Section\n pdf.set_text_color(25, 33, 78)\n pdf.set_font(\"Helvetica\", \"B\", 12)\n pdf.cell(150, 10, \"Model Loss Curve:\", 0, 2)\n pdf.image(img_path, x=None, y=None, w=160, h=0, type=\"PNG\", link=\"\")\n\n # Second Page of Report\n pdf.add_page()\n pdf.set_xy(0, 0)\n pdf.cell(20, 20)\n\n # Model Arch Section\n pdf.cell(150, 20, \"Model Configuration:\", 0, 2)\n pdf.set_font(\"Helvetica\", \"\", 12)\n if model_arch is None:\n model_arch = \"No model configuration was provided\"\n pdf.set_text_color(255, 96, 80)\n pdf.multi_cell(180, 8, str(model_arch))\n\n # Third Page of Report\n pdf.add_page()\n pdf.set_xy(0, 0)\n pdf.cell(20, 20, \" \")\n\n # Training Loss Section\n pdf.set_text_color(25, 33, 78)\n pdf.set_font(\"Helvetica\", \"B\", 12)\n pdf.cell(150, 20, \"Detailed Loss Output:\", 0, 2)\n pdf.cell(40, 8, \"Epoch\", 1, 0, \"C\")\n pdf.cell(30, 8, \"Train Loss\", 1, 0, \"C\")\n pdf.cell(30, 8, \"Test Loss\", 1, 0, \"C\")\n pdf.cell(30, 8, \"Train Acc\", 1, 0, \"C\")\n pdf.cell(30, 8, \"Test Acc\", 1, 2, \"C\")\n pdf.set_text_color(255, 96, 80)\n pdf.set_font(\"Helvetica\", \"\", 12)\n pdf.cell(-130)\n for i in range(0, len(train_stats[\"train_loss\"])):\n pdf.cell(40, 8, \"{}\".format((i + 1)), 1, 0, \"C\")\n pdf.cell(30, 8, \"{:.3f}\".format((train_stats[\"train_loss\"][i])), 1, 0, \"C\")\n pdf.cell(30, 8, \"{:.3f}\".format((train_stats[\"test_loss\"][i])), 1, 0, \"C\")\n pdf.cell(30, 8, \"{:.3f}\".format((train_stats[\"train_acc\"][i])), 1, 0, \"C\")\n pdf.cell(30, 8, \"{:.3f}\".format((train_stats[\"test_acc\"][i])), 1, 2, \"C\")\n pdf.cell(-130)\n pdf.cell(90, 3, \"\", 0, 2)\n\n pdf.output(\n os.path.join(\n os.path.dirname(img_path),\n \"training_summary_{}.pdf\".format(model_name.lower()),\n ),\n \"F\",\n )",
"def save_pdf(self, response):\n\n # get metadata\n file_type = \"__comprovante_de_acesso__\"\n\n # options to save pdf\n file_id = str(uuid.uuid4())\n filename = \"{file_id}.pdf\".format(file_id=file_id)\n file_path = os.path.join(path, \"downloads\", self.scrape_id, filename)\n with open(file_path, 'wb') as f:\n f.write(response.body)\n\n # upload pdf to s3 and call the webhook\n self.upload_file(file_id)\n\n # update values in result\n self.result.update({file_type: {\"file_id\": file_id}})",
"def generate_recipt(investor_name, total_prices):\n \n pdf = fpdf.FPDF(format='letter') \n total = 0.0\n pdf.add_page() \n pdf.set_font(\"Arial\", size=12) \n pdf.cell(200, 10, txt='******************************************', ln=1, align=\"L\")\n pdf.cell(200,10, txt=' Recipt ',ln=2, align=\"L\")\n pdf.cell(200, 10, txt='******************************************', ln=3, align=\"L\")\n pdf.cell(200,10, txt=f'Date: {datetime.now().strftime(\"%B %d, %Y\")}', ln=4, align=\"L\")\n pdf.cell(200,10, txt=f'Investor Name: {investor_name.title()}', ln=5, align=\"L\")\n pdf.cell(200, 10, txt='******************************************', ln=6, align=\"L\")\n temp =6\n for symbol,individual_cost in total_prices.items():\n pdf.cell(200, 10, txt=f'{symbol} {individual_cost:.2f}' ,ln=temp+1, align=\"L\" )\n total = calculate_total_price(total_prices)\n \n pdf.cell(200,10, txt=f'Your Total excluding tax : {total:.2f}',ln= temp+1,align=\"L\")\n pdf.cell(200, 10, txt='******************************************', ln=temp+1, align=\"L\")\n try:\n os.makedirs(\"outputs\")\n except OSError as exc: \n if exc.errno != errno.EEXIST:\n raise\n try:\n pdf.output(\"outputs/recipt.pdf\")\n except Exception as e:\n print(f'generate_recipt encountered {e} exception')",
"def pdf_manager(self):\n\n s3ocr_root = self.s3ocr_etree() # get element s3xml\n\n # define font size\n titlefontsize = 18\n sectionfontsize = 15\n regularfontsize = 13\n hintfontsize = 10\n \n # etree labels\n ITEXT = \"label\"\n HINT = \"comment\"\n TYPE = \"type\"\n HASOPTIONS = \"has_options\"\n LINES = \"lines\"\n BOXES = \"boxes\"\n\n #l10n\n l10n = self.l10n\n\n # get pdf title\n if self.pdftitle == None or self.pdftitle == \"\":\n try:\n pdftitle = self.manager.s3.crud_strings[\\\n self.tablename].subtitle_list.decode(\"utf-8\")\n except:\n pdftitle = self.resource.tablename\n else:\n pdftitle = self.pdftitle\n\n # prepare pdf\n form = Form()\n form.decorate()\n\n # set header\n form.canvas.setTitle(pdftitle) # set pdf meta title\n form.print_text([pdftitle,],\n fontsize=titlefontsize,\n style=\"center\") # set pdf header title\n\n form.print_text(\n [\n unicode(l10n.get(\"ocr_inst\").get(\"inst1\").decode(\"utf-8\")),\n unicode(l10n.get(\"ocr_inst\").get(\"inst2\").decode(\"utf-8\")),\n unicode(l10n.get(\"ocr_inst\").get(\"inst3\").decode(\"utf-8\"))\n ],\n fontsize=regularfontsize,\n gray=0)\n form.linespace(3)\n # printing the etree\n for eachresource in s3ocr_root:\n form.draw_line()\n form.print_text([\n eachresource.attrib.get(ITEXT,\n eachresource.attrib.get(\"name\"))\n ],\n fontsize=sectionfontsize)\n form.draw_line(nextline=1)\n form.linespace(12) # line spacing between each field\n for eachfield in eachresource.iterchildren():\n fieldlabel = eachfield.attrib.get(ITEXT)\n spacing = \" \" * 5\n fieldhint = self.__trim(eachfield.attrib.get(HINT))\n if fieldhint != \"\" and fieldhint != None:\n form.print_text([\"%s%s( %s )\" % \\\n (fieldlabel,\n spacing,\n fieldhint)],\n fontsize=regularfontsize)\n else:\n form.print_text([fieldlabel],\n fontsize=regularfontsize)\n\n if eachfield.attrib.get(\"readable\", \"False\") == \"True\" and \\\n eachfield.attrib.get(\"writable\", \"False\") == \"False\":\n # if it is a readonly field\n form.print_text(\n [eachfield.attrib.get(\"default\",\"No default Value\")],\n seek=10,\n )\n elif eachfield.attrib.get(HASOPTIONS) == \"True\":\n fieldtype = eachfield.attrib.get(TYPE)\n # if the field has to be shown with options\n if fieldtype == \"boolean\":\n form.nextline()\n form.resetx()\n bool_text = l10n.get(\"boolean\")\n form.print_text(\n [bool_text.get(\"yes\").decode(\"utf-8\")],\n continuetext=1,\n seek=3,\n )\n # TODO: Store positions\n form.draw_circle(\n boxes=1,\n continuetext=1,\n gray=0.9,\n seek=10,\n fontsize=12,\n )\n form.print_text(\n [bool_text.get(\"no\").decode(\"utf-8\")],\n continuetext=1,\n seek=10,\n )\n # TODO: Store positions\n form.draw_circle(\n boxes=1,\n continuetext=1,\n gray=0.9,\n seek=10,\n fontsize=12,\n )\n else:\n if fieldtype == \"multiselect\":\n option_hint = l10n.get(\"select\").get(\"multiselect\")\n else:\n option_hint = l10n.get(\"select\").get(\"singleselect\")\n form.print_text(\n [option_hint.decode(\"utf-8\")],\n fontsize=hintfontsize,\n gray=0.4,\n seek=3,\n )\n s3ocrselect = eachfield.getchildren()[0]\n form.nextline(regularfontsize)\n form.resetx() # move cursor to the front\n optionseek = 10\n # resting margin for options\n formmargin = form.marginsides\n form.marginsides = optionseek + formmargin\n for eachoption in s3ocrselect.iterchildren():\n form.print_text(\n [eachoption.text],\n continuetext=1,\n fontsize = regularfontsize,\n seek = 10,\n )\n # TODO: Store positions\n form.draw_circle(\n boxes=1,\n continuetext=1,\n gray=0.9,\n seek=10,\n fontsize=12,\n )\n # restoring orginal margin\n form.marginsides = formmargin\n \n else:\n # if it is a text field\n fieldtype = eachfield.attrib.get(TYPE)\n BOXES_TYPES = [\"string\", \"textbox\", \"integer\",\n \"double\", \"date\", \"datetime\",]\n if fieldtype in BOXES_TYPES:\n if fieldtype in [\"string\", \"textbox\"]:\n form.linespace(3)\n num_lines = int(eachfield.attrib.get(\"lines\",\n 1))\n for eachline in xrange(num_lines):\n # TODO: Store positions\n form.draw_check_boxes(\n completeline=1,\n gray=0.9,\n seek=3,\n )\n elif fieldtype in [\"integer\", \"double\"]:\n num_boxes = int(eachfield.attrib.get(\"boxes\",\n 9))\n form.linespace(3)\n # TODO: Store positions\n form.draw_check_boxes(\n boxes = num_boxes,\n gray=0.9,\n seek=3,\n )\n elif fieldtype in [\"date\", \"datetime\"]:\n # print hint\n hinttext = \\\n l10n.get(\"datetime_hint\").get(fieldtype).decode(\"utf-8\")\n form.print_text(\n [hinttext],\n fontsize=hintfontsize,\n gray=0.4,\n seek=3,\n )\n form.linespace(8)\n datetime_continuetext = 0\n datetime_seek = 3\n if fieldtype == \"datetime\":\n datetime_continuetext = 1\n datetime_seek = 6\n #HH\n # TODO: Store positions\n form.draw_check_boxes(\n boxes = 2,\n gray=0.9,\n seek = 3,\n )\n #MM\n # TODO: Store positions\n form.draw_check_boxes(\n boxes = 2,\n gray=0.9,\n continuetext=1,\n seek = 4,\n )\n # DD\n # TODO: Store positions\n form.draw_check_boxes(\n boxes = 2,\n gray=0.9,\n continuetext = datetime_continuetext,\n seek = datetime_seek,\n )\n # MM\n # TODO: Store positions\n form.draw_check_boxes(\n boxes = 2,\n gray=0.9,\n continuetext=1,\n seek = 4,\n )\n # YYYY\n # TODO: Store positions\n form.draw_check_boxes(\n boxes = 4,\n gray=0.9,\n continuetext=1,\n seek = 4,\n )\n else:\n self.r.error(501, self.manager.PARSE_ERROR)\n print sys.stderr(\"%s :invalid field type: %s\" %\\\n (eachfield.attrib.get(\"name\"),\n fieldtype))\n return form.save()",
"def print_receipt(Student):\n\n pdf = FPDF('P', 'mm', 'A4')\n pdf.add_page('P')\n pdf.set_font('Times', 'B', 14)\n\n pdf.multi_cell(0, 5, 'Student Dues Payment Receipt')\n pdf.ln()\n pdf.multi_cell(0, 5, ('Student ID: %s' % Student.student_ID))\n pdf.ln()\n pdf.multi_cell(0, 5, ('Name: %s' % Student.name))\n pdf.ln()\n pdf.multi_cell(0, 5, ('Mess Fees: %s' % Student.mess_charge))\n pdf.ln()\n\n if Student.room_type == \"S\":\n room_rent = db.get(\"hall\", Student.hall_ID, \"single_room_rent\")[0]\n elif Student.room_type == \"D\":\n room_rent = db.get(\"hall\", Student.hall_ID, \"double_room_rent\")[0]\n\n pdf.multi_cell(0, 5, ('Room Rent: %s' % room_rent))\n pdf.ln()\n\n pdf.multi_cell(0, 5, ('Amenities Charge: %s' % str(db.get(\"hall\", Student.hall_ID, \"amenities_charge\")[0])))\n pdf.ln()\n\n pdf.multi_cell(0, 5, ('Total Amount Paid: %s' % str(Student.total_dues)))\n pdf.ln()\n\n # Write generated output file to PDF\n pdf.output(('receipt_%s.pdf' % Student.hall_ID), 'F')",
"def generate_pdf_flask_response(pdf_data):\n html = HTML(string=pdf_data)\n\n return render_pdf(html)",
"def create_training_file(self):\n self.master.switch_frame(TrainingFileView)",
"def generate_report_pre_save(model, path, contents_manager, **kwargs):\n if model['type'] != 'notebook':\n return\n\n notebook = model['content']\n base, ext = os.path.splitext(path)\n output_filename = \"{}.pdf\".format(base)\n template_filename = 'custom.tplx'\n notebook_to_pdf = load_module('notebook_to_pdf', contents_manager.root_dir)\n # Make sure that we continue working even if the conversion fails\n try:\n notebook_to_pdf.convert_notebook_to_pdf(notebook, output_filename, template_filename)\n except Exception as e:\n contents_manager.log.error(e, exc_info=True)",
"def ReporteSolicitudesDeCambio(request, id_proyecto):\n from reportlab.lib.units import inch, cm\n from reportlab.lib.pagesizes import A4\n\n ancho = A4[0]\n alto = A4[1]\n\n proyecto = Proyecto.objects.get(id=id_proyecto)\n response = HttpResponse(content_type='application/pdf')\n response['Content-Disposition'] = 'attachment; filename=\"ReporteSolicitudesDeCambio.pdf\"'\n p = canvas.Canvas(response)\n p.setTitle('Reporte Solicitudes De Cambio del proyecto')\n p.translate(2.3 * cm, 0.3 * cm)\n p.setFont(\"Times-Italic\", 25)\n p.setFillColorRGB(0, 0, 0.5)\n p.drawString(30, alto - 40, \" Reporte Solicitudes De Cambio\")\n p.drawString(30, alto - 80, \" Proyecto : %s\" % proyecto)\n p.setFont(\"Courier-BoldOblique\", 14)\n p.saveState()\n solicitudes = SolicitudItem.objects.all()\n c = 100\n cont = 0\n for temp in solicitudes:\n cont = cont + 1\n if cont >= 1:\n p.setFillColorRGB(0, 0, 0.9)\n c = c + 40\n p.drawString(25, alto - c, \"Item , Linea Base , Usuario Solicitante , Estado\")\n p.setFont(\"Helvetica\", 12)\n c = c + 20\n for i in solicitudes:\n pid = i.item.tipoitem.fase.fkproyecto.id\n if (pid == proyecto.id):\n lb = i.item.lineabase\n if i.completo:\n if i.votossi > i.votosno:\n est = 'APROBADA'\n else:\n est = 'RECHAZADA'\n else:\n est = 'PENDIENTE'\n p.setFillColorRGB(0, 0, 0)\n p.drawString(25, alto - c, \"%s , %s , %s , %s\" % (i.item.nombre, lb, i.solicitante, est))\n c = c + 20\n p.showPage()\n p.save()\n return response"
] | [
"0.67373437",
"0.63020295",
"0.6226293",
"0.6087851",
"0.5910283",
"0.58224714",
"0.57705843",
"0.5737507",
"0.5713494",
"0.56827766",
"0.5535205",
"0.55348325",
"0.5490096",
"0.54388666",
"0.5369891",
"0.53277147",
"0.52944934",
"0.52798676",
"0.5266541",
"0.5252111",
"0.52401656",
"0.5238857",
"0.5215014",
"0.5207984",
"0.5201335",
"0.51799184",
"0.5165266",
"0.5165167",
"0.5160144",
"0.5129794"
] | 0.71051025 | 0 |
View for employee development plan details | def development_plan_details(request, development_plan_id): #, employee_id ):
# employee = Employee.objects.get(user__pk=request.user.pk)
# employee = Employee.objects.filter(pk=int(employee_id)).first()
development_plan = DevelopmentPlan.objects.get(pk=int(development_plan_id))
current_employee = Employee.objects.filter(user__pk=request.user.pk).first()
all_employees = development_plan.employee_relation.all()
try:
development_plan = DevelopmentPlan.objects.get(pk=int(development_plan_id))
data={}
development_plan_object_list=[]
dev_plan={}
dev_plan["id"] = development_plan.id
dev_plan["deleted"] = development_plan.deleted
if development_plan.type:
dev_plan["type"] = development_plan.type.name
# dev_plan["finished_at"] = DevelopmentPlanToEmployeeRelation.objects.get(development_plan = development_plan)\
# .finished_at
dev_plan["created_at"] = development_plan.created_at
dev_plan["created_by"] = development_plan.created_by.username
development_plan_object_list.append({"dev_plan_details":dev_plan})
# manager_relation
manager_data={}
manager_data["manager_username"] = development_plan.manager_relation.user.username
manager_data["manager_first_name"] = development_plan.manager_relation.user.first_name
manager_data["manager_last_name"] = development_plan.manager_relation.user.last_name
development_plan_object_list.append({"manager_data":manager_data})
# employee_relation
employee_data={}
all_employees = development_plan.employee_relation.all()
if all_employees:
emp_list=[]
for emp in all_employees:
emp_data={}
emp_data["id"] = emp.user.id
emp_data["username"] = emp.user.username
emp_data["first_name"] = emp.user.first_name
emp_data["last_name"] = emp.user.last_name
emp_data["status_questions"] = emp.status_questions
emp_data["dev_plan_finished_at"] = DevelopmentPlanToEmployeeRelation\
.objects.get(employee=emp,
development_plan = development_plan)\
.finished_at
employee_role = EmployeeRole.objects.filter(employee=emp).all()
name_role_list = []
for obj in employee_role:
name_role_list.append(obj.role.name)
emp_data["roles"] = name_role_list
emp_list.append(emp_data)
employee_data={"all_employees":emp_list}
else:
return JsonResponse(data={"details":"Any employee has Development Plan with id={}"
.format(development_plan.id)}, status=404)
development_plan_object_list.append({"employee_data":employee_data})
# competence_parts
all_competence_parts = development_plan.competence_parts.all()
competence_list = []
questions_list = []
sliders_list = []
if all_competence_parts:
for comp_part in all_competence_parts:
comp_part_data={}
competence_d={"competence_parts": []}
comp_part_data["id"] = comp_part.id
comp_part_data["title"] = comp_part.title
comp_part_data["description"] = comp_part.description
comp_part_data["competence_status"] = comp_part.competence_status
all_questions = comp_part.question_set.all()
if all_questions:
for question in all_questions:
question_data = {}
question_data["question_id"] = question.id
question_data["title"] = question.title
question_data["competence_part"] = question.competence_part.id
answer = Answer.objects.filter(question__id = question.id).first() #employee=current_employee
if answer:
question_data["answer_id"] = answer.id
question_data["answer"] = answer.title
questions_list.append(question_data)
comp_part_data["questions"] = questions_list
all_sliders = comp_part.slider_set.all()
if all_sliders:
for slider in all_sliders:
slider_data = {}
slider_data["slider_id"] = slider.id
slider_data["scale"] = slider.scale
slider_data["competence_part"] = slider.competence_part.id
answer = Answer.objects.filter(slider__id = slider.id).first() #employee=current_employee
if slider:
slider_data["answer_id"] = answer.id
slider_data["answer"] = answer.slider.scale
sliders_list.append(slider_data)
comp_part_data["sliders"] = sliders_list
comp_part_data["created_at"] = comp_part.created_at
comp_part_data["created_by"] = comp_part.created_by.username
comp_part_data["updated_at"] = comp_part.updated_at
comp_part_data["updated_by"] = comp_part.updated_by.username
competence_keys_list = ['id', 'title', 'description',
'language_code', 'status']
if not competence_list:
get_competence_data(competence_keys_list, comp_part.competence, competence_d,
comp_part_data, competence_list)
else:
competence_found = False
for competence_dict in competence_list:
if competence_dict['id'] == comp_part.competence.id:
competence_dict['competence_parts'].append(comp_part_data)
competence_found = True
break
if not competence_found:
get_competence_data(competence_keys_list, comp_part.competence, competence_d,
comp_part_data, competence_list)
development_plan_object_list.append({"competences":competence_list})
else:
return JsonResponse(data={"details":"Development Plan with id={} doesn't have any Competence Part yet"
.format(development_plan.id)}, status=404)
data = {"dev_plan:": development_plan_object_list}
return JsonResponse(status=201, data=data)
except DevelopmentPlan.DoesNotExist:
return JsonResponse(data={"details":"Development Plan with this id doesn't exist"}, status=404) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_active_development_plan_for_user(request):\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n current_development_plan = DevelopmentPlan.objects.filter(\n employee_relation=current_employee,\n employee_relation__developmentplantoemployeerelation__finished_at__isnull=True).first() # is active !!!\n\n if not current_employee:\n raise PermissionDenied()\n\n if current_development_plan:\n data={}\n development_plan_object_list=[]\n dev_plan={}\n dev_plan[\"id\"] = current_development_plan.id\n dev_plan[\"deleted\"] = current_development_plan.deleted\n if current_development_plan.type:\n dev_plan[\"type\"] = current_development_plan.type.name\n dev_plan[\"finished_at\"] = DevelopmentPlanToEmployeeRelation.objects\\\n .get(employee=current_employee, development_plan = current_development_plan)\\\n .finished_at\n\n dev_plan[\"created_at\"] = current_development_plan.created_at\n dev_plan[\"created_by\"] = current_development_plan.created_by.username\n\n development_plan_object_list.append({\"dev_plan_details\":dev_plan})\n\n# manager_relation\n manager_data={}\n manager_data[\"manager_username\"] = current_development_plan.manager_relation.user.username\n manager_data[\"manager_first_name\"] = current_development_plan.manager_relation.user.first_name\n manager_data[\"manager_last_name\"] = current_development_plan.manager_relation.user.last_name\n development_plan_object_list.append({\"manager_data\":manager_data})\n\n# employee_relation\n employee_data={}\n all_employees = current_development_plan.employee_relation.all()\n if all_employees:\n emp_list=[]\n for emp in all_employees:\n emp_data={}\n emp_data[\"id\"] = emp.user.id\n emp_data[\"username\"] = emp.user.username\n emp_data[\"first_name\"] = emp.user.first_name\n emp_data[\"last_name\"] = emp.user.last_name\n emp_data[\"status_questions\"] = emp.status_questions\n\n employee_role = EmployeeRole.objects.filter(employee=emp).all()\n name_role_list = []\n for obj in employee_role:\n name_role_list.append(obj.role.name)\n emp_data[\"roles\"] = name_role_list\n emp_list.append(emp_data)\n employee_data={\"all_employees\":emp_list}\n else:\n return JsonResponse(data={\"details\":\"Any employee has Development Plan with id={}\"\n .format(current_development_plan.id)}, status=404)\n\n development_plan_object_list.append({\"employee_data\":employee_data})\n\n\n# competence_parts\n all_competence_parts = current_development_plan.competence_parts.all()\n\n competence_list = []\n questions_list = []\n sliders_list = []\n\n if all_competence_parts:\n for comp_part in all_competence_parts:\n\n comp_part_data={}\n competence_d={\"competence_parts\": []}\n\n comp_part_data[\"id\"] = comp_part.id\n comp_part_data[\"title\"] = comp_part.title\n comp_part_data[\"description\"] = comp_part.description\n comp_part_data[\"competence_status\"] = comp_part.competence_status\n\n all_questions = comp_part.question_set.all()\n print all_questions\n if all_questions:\n for question in all_questions:\n question_data = {}\n question_data[\"question_id\"] = question.id\n question_data[\"title\"] = question.title\n question_data[\"competence_part\"] = question.competence_part.id\n\n answer = Answer.objects.filter(question__id = question.id,\n employee=current_employee).first()\n\n if answer:\n question_data[\"answer_id\"] = answer.id\n question_data[\"answer\"] = answer.title\n\n questions_list.append(question_data)\n\n comp_part_data[\"questions\"] = questions_list\n\n all_sliders = comp_part.slider_set.all()\n if all_sliders:\n for slider in all_sliders:\n slider_data = {}\n slider_data[\"slider_id\"] = slider.id\n slider_data[\"scale\"] = slider.scale\n slider_data[\"competence_part\"] = slider.competence_part.id\n\n answer = Answer.objects.filter(slider__id = slider.id,\n employee=current_employee).first()\n\n if slider:\n slider_data[\"answer_id\"] = answer.id\n slider_data[\"answer\"] = answer.slider.scale\n\n sliders_list.append(slider_data)\n\n comp_part_data[\"sliders\"] = sliders_list\n\n comp_part_data[\"created_at\"] = comp_part.created_at\n comp_part_data[\"created_by\"] = comp_part.created_by.username\n comp_part_data[\"updated_at\"] = comp_part.updated_at\n comp_part_data[\"updated_by\"] = comp_part.updated_by.username\n\n competence_keys_list = ['id', 'title', 'description',\n 'language_code', 'status']\n\n if not competence_list:\n get_competence_data(competence_keys_list, comp_part.competence, competence_d,\n comp_part_data, competence_list)\n\n else:\n competence_found = False\n for competence_dict in competence_list:\n if competence_dict['id'] == comp_part.competence.id:\n competence_dict['competence_parts'].append(comp_part_data)\n competence_found = True\n break\n\n if not competence_found:\n get_competence_data(competence_keys_list, comp_part.competence, competence_d,\n comp_part_data, competence_list)\n\n development_plan_object_list.append({\"competences\":competence_list})\n\n else:\n return JsonResponse(data={\"details\":\"Development Plan with id={} doesn't have any Competence Part yet\"\n .format(current_development_plan.id)}, status=404)\n\n data = {\"dev_plan:\": development_plan_object_list}\n return JsonResponse(status=201, data=data)\n\n else:\n return JsonResponse(data={\"details\": \"The user with id={} doesn't have an active Development Plan\"\n .format(current_employee.user.id)}, status=404)",
"def get_all_development_plans_for_user(request):\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n user_development_plans = DevelopmentPlan.objects.filter(employee_relation=current_employee).all()\n\n if not current_employee:\n raise PermissionDenied(\"You don't have any employee assigned to you.\", 401)\n\n if user_development_plans:\n data={}\n user_development_plans_list = []\n for plan in user_development_plans:\n\n development_plan_object_list=[]\n dev_plan = {}\n\n dev_plan[\"id\"] = plan.id\n dev_plan[\"deleted\"] = plan.deleted\n if plan.type:\n dev_plan[\"type\"] = plan.type.name\n dev_plan[\"finished_at\"] = DevelopmentPlanToEmployeeRelation.objects\\\n .get(employee=current_employee, development_plan = plan).finished_at\n dev_plan[\"created_at\"] = plan.created_at\n dev_plan[\"created_by\"] = plan.created_by.username\n\n development_plan_object_list.append({\"dev_plan_details\":dev_plan})\n\n manager_data = {}\n manager_data[\"manager_username\"] = plan.manager_relation.user.username\n manager_data[\"id\"] = plan.manager_relation.user.id\n\n development_plan_object_list.append({\"manager_data\":manager_data})\n user_development_plans_list.append(development_plan_object_list)\n\n else:\n return JsonResponse(data={\"details\":\"Employee with id={} doesn't have any Development Plan\"\n .format(request.user.pk)}, status=404)\n\n data = {\"user_development_plans:\": user_development_plans_list}\n return JsonResponse(status=201, data=data)",
"def plan_detail(request, plan_id):\n\n plan = get_object_or_404(Plan, pk=plan_id)\n\n context = {\n 'plan': plan,\n }\n\n return render(request, 'plans/plan_detail.html', context)",
"def get_all_user_development_plans_for_manager(request, employee_id):\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n user_development_plans = DevelopmentPlan.objects.filter(employee_relation=current_employee).all()\n employee = Employee.objects.filter(pk=int(employee_id)).first()\n\n if not current_employee:\n raise PermissionDenied(\"You don't have any employee assigned to you.\", 401)\n\n if not current_employee.isEnsoUser() and current_employee.is_manager:\n raise PermissionDenied()\n actions = employee.action_set.all()\n if not int(employee_id) in [obj.id for obj in Employee.objects.filter(manager=current_employee).all()]:\n raise PermissionDenied(\"Employee with id={} is not assigned to you.\".format(employee_id), 401)\n\n if user_development_plans:\n data={}\n user_development_plans_list = []\n for plan in user_development_plans:\n\n development_plan_object_list=[]\n dev_plan = {}\n\n dev_plan[\"id\"] = plan.id\n dev_plan[\"deleted\"] = plan.deleted\n if plan.type:\n dev_plan[\"type\"] = plan.type.name\n dev_plan[\"finished_at\"] = DevelopmentPlanToEmployeeRelation.objects\\\n .get(employee=current_employee, development_plan = plan).finished_at\n dev_plan[\"created_at\"] = plan.created_at\n dev_plan[\"created_by\"] = plan.created_by.username\n\n development_plan_object_list.append({\"dev_plan_details\":dev_plan})\n\n manager_data = {}\n manager_data[\"manager_username\"] = plan.manager_relation.user.username\n manager_data[\"id\"] = plan.manager_relation.user.id\n\n development_plan_object_list.append({\"manager_data\":manager_data})\n user_development_plans_list.append(development_plan_object_list)\n\n else:\n return JsonResponse(data={\"details\":\"Employee with id={} doesn't have any Development Plan\"\n .format(request.user.pk)}, status=404)\n\n data = {\"user_development_plans:\": user_development_plans_list}\n return JsonResponse(status=201, data=data)",
"def plan_list_get(request):\n return list_by_company_guid(request, PlanModel)",
"def plan_get(request):\n company = auth_api_key(request)\n plan = get_and_check_plan(request, company)\n return plan",
"def plans(self):\n title = self.context.Title()\n return self.portal_catalog(portal_type='Plan', Subject=title)",
"def __str__(self):\n return self.plan.title",
"def show(self):\n self.parser.add_argument('plan_uuid',\n help=\"Plan uuid or name\")\n args = self.parser.parse_args()\n response = self.client.plans.find(name_or_id=args.plan_uuid)\n fields = ['uuid', 'name', 'description', 'uri']\n data = dict([(f, getattr(response, f, ''))\n for f in fields])\n cliutils.print_dict(data, wrap=72)",
"def show(ctx, project_id, backend):\n try:\n project = ctx.obj['projects_db'].get(project_id, backend)\n except IOError:\n raise Exception(\"Error: the projects database file doesn't exist. \"\n \"Please run `taxi update` to create it\")\n\n if project is None:\n ctx.obj['view'].err(\n \"Could not find project `%s`\" % (project_id)\n )\n else:\n ctx.obj['view'].project_with_activities(project)",
"def __str__(self):\n return self.plan",
"def view_project():\n\n project_title = request.args.get('title')\n\n description, max_grade = hackbright.get_project_info(project_title)\n\n student_grades = hackbright.list_students_by_completed_project(project_title)\n\n return render_template(\"project_info.html\",\n title=project_title,\n description=description,\n max_grade=max_grade,\n student_grades=student_grades)",
"def dashboard(request):\n employee = request.user.employee_user.first()\n widgets = list()\n # development_plans = employee.getDevelopmentPlans()\n if employee.is_manager:\n widgets.append(dict(\n # template=\"mus/_widget_waiting_developmentplans.html\",\n data=employee.getMyEmployees(),\n # title=_('Expecting preparation guides from')\n ))\n widgets.append(dict(\n # template=\"mus/_widget_todo_developmentplans.html\",\n data=employee.getMyEmployees(),\n # title=_('Preparation guides to do')\n ))\n # widgets.append(dict(\n # template = \"mus/_widget_my_developmentplans.html\",\n # data = development_plans,\n # title = _('My development plans')\n # ))\n return JsonResponse(status=200,data={\n # 'widgets': model_to_dict(widgets),\n 'employee': model_to_dict(employee),\n # 'development_plans': development_plans\n })",
"def plan(self, plan_code):\r\n return pl.Plan(self, plan_code)",
"def details(request):\n\treturn render(request, 'ExcelApp/main.html')",
"def view(self, parent, **kargs):\n design = Service('Design')\n return design.view_list(parent, self, **kargs)",
"def show():\n info(str(Project))",
"def display_accounts_details():\n return Records.display_records()",
"def show_department(id_: int):\n\n logger.debug('Routed to /departments/%i', id_)\n titles = ['Name', 'Average Salary', 'Employees', 'E-mail']\n department = None\n\n try:\n department = ds.get(id_)\n except IntegrityError:\n logger.error(\"Can't find employee with id %i\", id_)\n abort(404)\n\n logger.info('Get department %s', department.name)\n return render_template('department.html',\n title=f'Department {department.name}',\n table_title=f'Department: {department.name}',\n headers=titles,\n department=department)",
"def committee_show(request, pk):\n committee = Committee.objects.get(pk=pk)\n\n delegates = Delegate.objects.filter(committee_id=pk)\n\n context = {\"committee\": committee, \"delegates\": delegates}\n template = \"jurycore/committee_show.html\"\n return render(request, template, context)",
"def get_plan(self):\n\t\tresponse = self.client.get(self._endpoint + \"/plan\")\n\t\tplan = response.json['plans']\n\t\tplan = list(plan.items())[0][1]\n\t\treturn Plan(plan['plan_id'],data=plan)",
"def plan(self):\n\n plan = f\"\"\"\n Input parameters: {self.params}\n Product: {self.product}\n\n Source code:\n {self.source_code}\n \"\"\"\n\n print(plan)",
"def get_and_display_project():\n\n project = request.args.get('project')\n\n title, description, max_grade = hackbright.get_project_by_title(project)\n\n\n github_grade_list = hackbright.get_grades_by_title(project)\n\n return render_template(\"project_info.html\",\n title=title,\n description=description,\n max_grade=max_grade,\n github_grade_list=github_grade_list)",
"def org_details(request, org_id):\n org = Organization.objects.get(pk=org_id)\n maps = VisitingCards.objects.filter(organization=org)\n designations = [m.designation for m in maps]\n users = [m.user for m in maps]\n # Remove None objects\n from operator import is_not\n from functools import partial\n designations = filter(partial(is_not, None),designations)\n users = filter(partial(is_not, None),users)\n\n return render_to_response('organization/details.html',\n {\"org\": org, \"maps\": maps, \"designations\": designations, \"users\":users},\n context_instance=RequestContext(request))",
"def all_plans(request):\n\n plans = Plan.objects.all()\n\n context = {\n 'plans': plans,\n }\n\n return render(request, 'plans/plans.html', context)",
"def show(self):\n return self._project.show()",
"def get_and_check_plan(request, company):\n model = PlanModel(request.session)\n guid = request.matchdict['plan_guid']\n plan = model.get(guid)\n if plan is None:\n raise HTTPNotFound('No such plan {}'.format(guid))\n if plan.company_guid != company.guid:\n raise HTTPForbidden('You have no permission to access plan {}'\n .format(guid))\n return plan",
"def department(department_id):\n # gather data from db about all employees\n return render_template(\"department.html\",\n department_id=department_id)",
"def show_project():\n\n title = request.args.get('title')\n\n title, description, grade = hackbright.get_project_by_title(title)\n\n grade_list = hackbright.get_grades_by_title(title)\n\n html = render_template(\"project.html\", title=title,\n description=description, grade=grade,\n grade_list=grade_list)\n\n return html",
"def report_development(request):\n q = Q(app_status__name__iequals='Current Version') # actual?\n q = q | Q(app_status__name__iequals='In Development') # projected?\n q = q | Q(app_status__name__iequals='In Suspense') # supense\n q = q | Q(app_status__name__iequals='Unassigned') # TBD?\n apps = Application.objects.filter(q).values('release_date', 'release', 'acronym', 'sr_number', 'owner_org', 'nasa_requester', 'release_change_description', 'app_status__name').order_by('release_date', 'acronym', 'release')\n return render_to_response('report/app_pipeline_abbrev.html',\n {'object_list': apps,\n 'search_suggestions': _search_suggestions(),\n },\n context_instance=RequestContext(request));"
] | [
"0.6865501",
"0.6419754",
"0.64083785",
"0.6235996",
"0.6046271",
"0.60325843",
"0.60079944",
"0.5864522",
"0.5858046",
"0.58436126",
"0.5818266",
"0.57469684",
"0.5691461",
"0.5501445",
"0.54863083",
"0.5485314",
"0.5460011",
"0.54553616",
"0.54523605",
"0.5448519",
"0.54469216",
"0.54441017",
"0.5435253",
"0.54303443",
"0.5428215",
"0.541232",
"0.53913134",
"0.53536326",
"0.53465587",
"0.534052"
] | 0.68573505 | 1 |
View a list of user's development plans for manager | def get_all_user_development_plans_for_manager(request, employee_id):
current_employee = Employee.objects.get(user__pk=request.user.pk)
user_development_plans = DevelopmentPlan.objects.filter(employee_relation=current_employee).all()
employee = Employee.objects.filter(pk=int(employee_id)).first()
if not current_employee:
raise PermissionDenied("You don't have any employee assigned to you.", 401)
if not current_employee.isEnsoUser() and current_employee.is_manager:
raise PermissionDenied()
actions = employee.action_set.all()
if not int(employee_id) in [obj.id for obj in Employee.objects.filter(manager=current_employee).all()]:
raise PermissionDenied("Employee with id={} is not assigned to you.".format(employee_id), 401)
if user_development_plans:
data={}
user_development_plans_list = []
for plan in user_development_plans:
development_plan_object_list=[]
dev_plan = {}
dev_plan["id"] = plan.id
dev_plan["deleted"] = plan.deleted
if plan.type:
dev_plan["type"] = plan.type.name
dev_plan["finished_at"] = DevelopmentPlanToEmployeeRelation.objects\
.get(employee=current_employee, development_plan = plan).finished_at
dev_plan["created_at"] = plan.created_at
dev_plan["created_by"] = plan.created_by.username
development_plan_object_list.append({"dev_plan_details":dev_plan})
manager_data = {}
manager_data["manager_username"] = plan.manager_relation.user.username
manager_data["id"] = plan.manager_relation.user.id
development_plan_object_list.append({"manager_data":manager_data})
user_development_plans_list.append(development_plan_object_list)
else:
return JsonResponse(data={"details":"Employee with id={} doesn't have any Development Plan"
.format(request.user.pk)}, status=404)
data = {"user_development_plans:": user_development_plans_list}
return JsonResponse(status=201, data=data) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_all_development_plans_for_user(request):\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n user_development_plans = DevelopmentPlan.objects.filter(employee_relation=current_employee).all()\n\n if not current_employee:\n raise PermissionDenied(\"You don't have any employee assigned to you.\", 401)\n\n if user_development_plans:\n data={}\n user_development_plans_list = []\n for plan in user_development_plans:\n\n development_plan_object_list=[]\n dev_plan = {}\n\n dev_plan[\"id\"] = plan.id\n dev_plan[\"deleted\"] = plan.deleted\n if plan.type:\n dev_plan[\"type\"] = plan.type.name\n dev_plan[\"finished_at\"] = DevelopmentPlanToEmployeeRelation.objects\\\n .get(employee=current_employee, development_plan = plan).finished_at\n dev_plan[\"created_at\"] = plan.created_at\n dev_plan[\"created_by\"] = plan.created_by.username\n\n development_plan_object_list.append({\"dev_plan_details\":dev_plan})\n\n manager_data = {}\n manager_data[\"manager_username\"] = plan.manager_relation.user.username\n manager_data[\"id\"] = plan.manager_relation.user.id\n\n development_plan_object_list.append({\"manager_data\":manager_data})\n user_development_plans_list.append(development_plan_object_list)\n\n else:\n return JsonResponse(data={\"details\":\"Employee with id={} doesn't have any Development Plan\"\n .format(request.user.pk)}, status=404)\n\n data = {\"user_development_plans:\": user_development_plans_list}\n return JsonResponse(status=201, data=data)",
"def get_active_development_plan_for_user(request):\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n current_development_plan = DevelopmentPlan.objects.filter(\n employee_relation=current_employee,\n employee_relation__developmentplantoemployeerelation__finished_at__isnull=True).first() # is active !!!\n\n if not current_employee:\n raise PermissionDenied()\n\n if current_development_plan:\n data={}\n development_plan_object_list=[]\n dev_plan={}\n dev_plan[\"id\"] = current_development_plan.id\n dev_plan[\"deleted\"] = current_development_plan.deleted\n if current_development_plan.type:\n dev_plan[\"type\"] = current_development_plan.type.name\n dev_plan[\"finished_at\"] = DevelopmentPlanToEmployeeRelation.objects\\\n .get(employee=current_employee, development_plan = current_development_plan)\\\n .finished_at\n\n dev_plan[\"created_at\"] = current_development_plan.created_at\n dev_plan[\"created_by\"] = current_development_plan.created_by.username\n\n development_plan_object_list.append({\"dev_plan_details\":dev_plan})\n\n# manager_relation\n manager_data={}\n manager_data[\"manager_username\"] = current_development_plan.manager_relation.user.username\n manager_data[\"manager_first_name\"] = current_development_plan.manager_relation.user.first_name\n manager_data[\"manager_last_name\"] = current_development_plan.manager_relation.user.last_name\n development_plan_object_list.append({\"manager_data\":manager_data})\n\n# employee_relation\n employee_data={}\n all_employees = current_development_plan.employee_relation.all()\n if all_employees:\n emp_list=[]\n for emp in all_employees:\n emp_data={}\n emp_data[\"id\"] = emp.user.id\n emp_data[\"username\"] = emp.user.username\n emp_data[\"first_name\"] = emp.user.first_name\n emp_data[\"last_name\"] = emp.user.last_name\n emp_data[\"status_questions\"] = emp.status_questions\n\n employee_role = EmployeeRole.objects.filter(employee=emp).all()\n name_role_list = []\n for obj in employee_role:\n name_role_list.append(obj.role.name)\n emp_data[\"roles\"] = name_role_list\n emp_list.append(emp_data)\n employee_data={\"all_employees\":emp_list}\n else:\n return JsonResponse(data={\"details\":\"Any employee has Development Plan with id={}\"\n .format(current_development_plan.id)}, status=404)\n\n development_plan_object_list.append({\"employee_data\":employee_data})\n\n\n# competence_parts\n all_competence_parts = current_development_plan.competence_parts.all()\n\n competence_list = []\n questions_list = []\n sliders_list = []\n\n if all_competence_parts:\n for comp_part in all_competence_parts:\n\n comp_part_data={}\n competence_d={\"competence_parts\": []}\n\n comp_part_data[\"id\"] = comp_part.id\n comp_part_data[\"title\"] = comp_part.title\n comp_part_data[\"description\"] = comp_part.description\n comp_part_data[\"competence_status\"] = comp_part.competence_status\n\n all_questions = comp_part.question_set.all()\n print all_questions\n if all_questions:\n for question in all_questions:\n question_data = {}\n question_data[\"question_id\"] = question.id\n question_data[\"title\"] = question.title\n question_data[\"competence_part\"] = question.competence_part.id\n\n answer = Answer.objects.filter(question__id = question.id,\n employee=current_employee).first()\n\n if answer:\n question_data[\"answer_id\"] = answer.id\n question_data[\"answer\"] = answer.title\n\n questions_list.append(question_data)\n\n comp_part_data[\"questions\"] = questions_list\n\n all_sliders = comp_part.slider_set.all()\n if all_sliders:\n for slider in all_sliders:\n slider_data = {}\n slider_data[\"slider_id\"] = slider.id\n slider_data[\"scale\"] = slider.scale\n slider_data[\"competence_part\"] = slider.competence_part.id\n\n answer = Answer.objects.filter(slider__id = slider.id,\n employee=current_employee).first()\n\n if slider:\n slider_data[\"answer_id\"] = answer.id\n slider_data[\"answer\"] = answer.slider.scale\n\n sliders_list.append(slider_data)\n\n comp_part_data[\"sliders\"] = sliders_list\n\n comp_part_data[\"created_at\"] = comp_part.created_at\n comp_part_data[\"created_by\"] = comp_part.created_by.username\n comp_part_data[\"updated_at\"] = comp_part.updated_at\n comp_part_data[\"updated_by\"] = comp_part.updated_by.username\n\n competence_keys_list = ['id', 'title', 'description',\n 'language_code', 'status']\n\n if not competence_list:\n get_competence_data(competence_keys_list, comp_part.competence, competence_d,\n comp_part_data, competence_list)\n\n else:\n competence_found = False\n for competence_dict in competence_list:\n if competence_dict['id'] == comp_part.competence.id:\n competence_dict['competence_parts'].append(comp_part_data)\n competence_found = True\n break\n\n if not competence_found:\n get_competence_data(competence_keys_list, comp_part.competence, competence_d,\n comp_part_data, competence_list)\n\n development_plan_object_list.append({\"competences\":competence_list})\n\n else:\n return JsonResponse(data={\"details\":\"Development Plan with id={} doesn't have any Competence Part yet\"\n .format(current_development_plan.id)}, status=404)\n\n data = {\"dev_plan:\": development_plan_object_list}\n return JsonResponse(status=201, data=data)\n\n else:\n return JsonResponse(data={\"details\": \"The user with id={} doesn't have an active Development Plan\"\n .format(current_employee.user.id)}, status=404)",
"def plans(self):\n title = self.context.Title()\n return self.portal_catalog(portal_type='Plan', Subject=title)",
"def plan_list_get(request):\n return list_by_company_guid(request, PlanModel)",
"def plan_get(request):\n company = auth_api_key(request)\n plan = get_and_check_plan(request, company)\n return plan",
"def development_plan_details(request, development_plan_id): #, employee_id ):\n # employee = Employee.objects.get(user__pk=request.user.pk)\n # employee = Employee.objects.filter(pk=int(employee_id)).first()\n\n development_plan = DevelopmentPlan.objects.get(pk=int(development_plan_id))\n current_employee = Employee.objects.filter(user__pk=request.user.pk).first()\n all_employees = development_plan.employee_relation.all()\n\n try:\n development_plan = DevelopmentPlan.objects.get(pk=int(development_plan_id))\n data={}\n development_plan_object_list=[]\n dev_plan={}\n dev_plan[\"id\"] = development_plan.id\n dev_plan[\"deleted\"] = development_plan.deleted\n if development_plan.type:\n dev_plan[\"type\"] = development_plan.type.name\n # dev_plan[\"finished_at\"] = DevelopmentPlanToEmployeeRelation.objects.get(development_plan = development_plan)\\\n # .finished_at\n\n dev_plan[\"created_at\"] = development_plan.created_at\n dev_plan[\"created_by\"] = development_plan.created_by.username\n\n development_plan_object_list.append({\"dev_plan_details\":dev_plan})\n\n# manager_relation\n manager_data={}\n manager_data[\"manager_username\"] = development_plan.manager_relation.user.username\n manager_data[\"manager_first_name\"] = development_plan.manager_relation.user.first_name\n manager_data[\"manager_last_name\"] = development_plan.manager_relation.user.last_name\n development_plan_object_list.append({\"manager_data\":manager_data})\n\n# employee_relation\n employee_data={}\n all_employees = development_plan.employee_relation.all()\n if all_employees:\n emp_list=[]\n for emp in all_employees:\n emp_data={}\n emp_data[\"id\"] = emp.user.id\n emp_data[\"username\"] = emp.user.username\n emp_data[\"first_name\"] = emp.user.first_name\n emp_data[\"last_name\"] = emp.user.last_name\n emp_data[\"status_questions\"] = emp.status_questions\n\n emp_data[\"dev_plan_finished_at\"] = DevelopmentPlanToEmployeeRelation\\\n .objects.get(employee=emp,\n development_plan = development_plan)\\\n .finished_at\n\n employee_role = EmployeeRole.objects.filter(employee=emp).all()\n name_role_list = []\n for obj in employee_role:\n name_role_list.append(obj.role.name)\n emp_data[\"roles\"] = name_role_list\n emp_list.append(emp_data)\n employee_data={\"all_employees\":emp_list}\n else:\n return JsonResponse(data={\"details\":\"Any employee has Development Plan with id={}\"\n .format(development_plan.id)}, status=404)\n\n development_plan_object_list.append({\"employee_data\":employee_data})\n\n\n# competence_parts\n all_competence_parts = development_plan.competence_parts.all()\n\n competence_list = []\n questions_list = []\n sliders_list = []\n\n if all_competence_parts:\n for comp_part in all_competence_parts:\n\n comp_part_data={}\n competence_d={\"competence_parts\": []}\n\n comp_part_data[\"id\"] = comp_part.id\n comp_part_data[\"title\"] = comp_part.title\n comp_part_data[\"description\"] = comp_part.description\n comp_part_data[\"competence_status\"] = comp_part.competence_status\n\n all_questions = comp_part.question_set.all()\n if all_questions:\n for question in all_questions:\n question_data = {}\n question_data[\"question_id\"] = question.id\n question_data[\"title\"] = question.title\n question_data[\"competence_part\"] = question.competence_part.id\n\n answer = Answer.objects.filter(question__id = question.id).first() #employee=current_employee\n\n if answer:\n question_data[\"answer_id\"] = answer.id\n question_data[\"answer\"] = answer.title\n\n questions_list.append(question_data)\n\n comp_part_data[\"questions\"] = questions_list\n\n all_sliders = comp_part.slider_set.all()\n if all_sliders:\n for slider in all_sliders:\n slider_data = {}\n slider_data[\"slider_id\"] = slider.id\n slider_data[\"scale\"] = slider.scale\n slider_data[\"competence_part\"] = slider.competence_part.id\n\n answer = Answer.objects.filter(slider__id = slider.id).first() #employee=current_employee\n\n if slider:\n slider_data[\"answer_id\"] = answer.id\n slider_data[\"answer\"] = answer.slider.scale\n\n sliders_list.append(slider_data)\n\n comp_part_data[\"sliders\"] = sliders_list\n\n comp_part_data[\"created_at\"] = comp_part.created_at\n comp_part_data[\"created_by\"] = comp_part.created_by.username\n comp_part_data[\"updated_at\"] = comp_part.updated_at\n comp_part_data[\"updated_by\"] = comp_part.updated_by.username\n\n competence_keys_list = ['id', 'title', 'description',\n 'language_code', 'status']\n\n if not competence_list:\n get_competence_data(competence_keys_list, comp_part.competence, competence_d,\n comp_part_data, competence_list)\n else:\n competence_found = False\n for competence_dict in competence_list:\n if competence_dict['id'] == comp_part.competence.id:\n competence_dict['competence_parts'].append(comp_part_data)\n competence_found = True\n break\n\n if not competence_found:\n get_competence_data(competence_keys_list, comp_part.competence, competence_d,\n comp_part_data, competence_list)\n\n development_plan_object_list.append({\"competences\":competence_list})\n\n else:\n return JsonResponse(data={\"details\":\"Development Plan with id={} doesn't have any Competence Part yet\"\n .format(development_plan.id)}, status=404)\n\n data = {\"dev_plan:\": development_plan_object_list}\n return JsonResponse(status=201, data=data)\n\n except DevelopmentPlan.DoesNotExist:\n return JsonResponse(data={\"details\":\"Development Plan with this id doesn't exist\"}, status=404)",
"def show(self):\n self.parser.add_argument('plan_uuid',\n help=\"Plan uuid or name\")\n args = self.parser.parse_args()\n response = self.client.plans.find(name_or_id=args.plan_uuid)\n fields = ['uuid', 'name', 'description', 'uri']\n data = dict([(f, getattr(response, f, ''))\n for f in fields])\n cliutils.print_dict(data, wrap=72)",
"def test_users_listed(self):\n url = reverse('admin:core_user_changelist')\n res = self.client.get(url)\n\n self.assertContains(res, self.user.name)\n self.assertContains(res, self.user.plan)",
"def all_plans(request):\n\n plans = Plan.objects.all()\n\n context = {\n 'plans': plans,\n }\n\n return render(request, 'plans/plans.html', context)",
"def plans():",
"def dashboard(request):\n employee = request.user.employee_user.first()\n widgets = list()\n # development_plans = employee.getDevelopmentPlans()\n if employee.is_manager:\n widgets.append(dict(\n # template=\"mus/_widget_waiting_developmentplans.html\",\n data=employee.getMyEmployees(),\n # title=_('Expecting preparation guides from')\n ))\n widgets.append(dict(\n # template=\"mus/_widget_todo_developmentplans.html\",\n data=employee.getMyEmployees(),\n # title=_('Preparation guides to do')\n ))\n # widgets.append(dict(\n # template = \"mus/_widget_my_developmentplans.html\",\n # data = development_plans,\n # title = _('My development plans')\n # ))\n return JsonResponse(status=200,data={\n # 'widgets': model_to_dict(widgets),\n 'employee': model_to_dict(employee),\n # 'development_plans': development_plans\n })",
"def usersview_admin():\n\n # User objects list which includes list of all users which can be broken down into editors and sponsors\n # get all users\n user_objects=db.session.query(User.id,User.email,User.user_type,User.user_status,User.name,User.organization).\\\n order_by(User.id)\n\n # get a count of the user objects\n user_count = user_objects.count()\n\n # blank list to append to\n user_list=[]\n\n # loop through user objects\n for counter in range(0,user_count):\n user_list.append(user_objects[counter])\n\n # show list of document names\n users = user_list\n\n \"\"\"Logged-in User Dashboard.\"\"\"\n return render_template(\n 'usersview_admin.jinja2',\n users=users\n )",
"def plans(self):\r\n return pl.Plans(self)",
"def list(cls):\n return cls().requests.get('plan')",
"def return_admin_list(request):\n del request\n return return_user_list(Administrador)",
"def plans(self):\r\n return Plans(self)",
"def new_flight_plan(self):\n r = requests.post(self.base_url + f'/users/{self.username}/flight-plans')\n return r.text",
"def get_plans(user, title=None, category=None, priority=None, status=None,\n id=None, orderby=None):\n user = get_user(user)\n filters = create_filters(id, title, category,\n priority, status)\n selection = user.plans.filter(**filters)\n\n if orderby:\n selection = selection.order_by(orderby)\n\n if not len(selection):\n raise ObjectDoesNotFound('There is no plans with selected filters.')\n return selection",
"def get_plan(self):\n\t\tresponse = self.client.get(self._endpoint + \"/plan\")\n\t\tplan = response.json['plans']\n\t\tplan = list(plan.items())[0][1]\n\t\treturn Plan(plan['plan_id'],data=plan)",
"def admin_update_preview():\n return user_management_handler(\"show_admin\", \"\", False)",
"def show(ctx):\n skale = ctx.obj['skale']\n # from skale.utils.contracts_provision.main import add_test_permissions\n # add_test_permissions(skale)\n show_all_schains_names(skale)",
"def plans():\n results = []\n if 'qry' in request.args:\n look_for = request.args['qry']\n if look_for[0] == '*':\n look_for = ''\n zipcode = request.args['zipcode']\n\n try:\n plan = request.args['plan']\n except KeyError:\n return None\n\n # If this is a medicaid or private plan\n where = tools.get_location(zipcode)\n if where:\n if plan in ('medicaid', 'private'):\n state = where.STATE\n results = PlanNames.by_state(state, look_for, plan=='medicaid')\n results = [r.plan_name for r in results]\n if state == 'OH':\n results.append('OH State Medicaid')\n elif plan == 'medicare':\n county_code = where.GEO.COUNTY_CODE\n ma_region = where.GEO.MA_REGION_CODE\n pdp_region = where.GEO.PDP_REGION_CODE\n results = Plans.find_in_county(county_code, ma_region, pdp_region, look_for)\n\n return jsonify(sorted(results))",
"def plan_detail(request, plan_id):\n\n plan = get_object_or_404(Plan, pk=plan_id)\n\n context = {\n 'plan': plan,\n }\n\n return render(request, 'plans/plan_detail.html', context)",
"def get_goals_todo_info(self, cr, uid, context=None):\n all_goals_info = []\n plan_obj = self.pool.get('gamification.goal.plan')\n\n plan_ids = plan_obj.search(cr, uid, [('user_ids', 'in', uid), ('state', '=', 'inprogress')], context=context)\n for plan in plan_obj.browse(cr, uid, plan_ids, context=context):\n # serialize goals info to be able to use it in javascript\n serialized_goals_info = {\n 'id': plan.id,\n 'name': plan.name,\n 'visibility_mode': plan.visibility_mode,\n }\n user = self.browse(cr, uid, uid, context=context)\n serialized_goals_info['currency'] = user.company_id.currency_id.id\n\n if plan.visibility_mode == 'board':\n # board report should be grouped by planline for all users\n goals_info = plan_obj.get_board_goal_info(cr, uid, plan, subset_goal_ids=False, context=context)\n\n if len(goals_info) == 0:\n # plan with no valid planlines\n continue\n\n serialized_goals_info['planlines'] = []\n for planline_board in goals_info:\n vals = {'type_name': planline_board['goal_type'].name,\n 'type_description': planline_board['goal_type'].description,\n 'type_condition': planline_board['goal_type'].condition,\n 'type_computation_mode': planline_board['goal_type'].computation_mode,\n 'type_monetary': planline_board['goal_type'].monetary,\n 'type_suffix': planline_board['goal_type'].suffix,\n 'type_action': True if planline_board['goal_type'].action_id else False,\n 'type_display': planline_board['goal_type'].display_mode,\n 'target_goal': planline_board['target_goal'],\n 'goals': []}\n for goal in planline_board['board_goals']:\n # Keep only the Top 3 and the current user\n if goal[0] > 2 and goal[1].user_id.id != uid:\n continue\n\n vals['goals'].append({\n 'rank': goal[0] + 1,\n 'id': goal[1].id,\n 'user_id': goal[1].user_id.id,\n 'user_name': goal[1].user_id.name,\n 'state': goal[1].state,\n 'completeness': goal[1].completeness,\n 'current': goal[1].current,\n 'target_goal': goal[1].target_goal,\n })\n if uid == goal[1].user_id.id:\n vals['own_goal_id'] = goal[1].id\n serialized_goals_info['planlines'].append(vals)\n\n else:\n # individual report are simply a list of goal\n goals_info = plan_obj.get_indivual_goal_info(cr, uid, uid, plan, subset_goal_ids=False, context=context)\n\n if not goals_info:\n continue\n\n serialized_goals_info['goals'] = []\n for goal in goals_info:\n serialized_goals_info['goals'].append({\n 'id': goal.id,\n 'type_name': goal.type_id.name,\n 'type_description': goal.type_description,\n 'type_condition': goal.type_id.condition,\n 'type_monetary': goal.type_id.monetary,\n 'type_suffix': goal.type_id.suffix,\n 'type_action': True if goal.type_id.action_id else False,\n 'type_display': goal.type_id.display_mode,\n 'state': goal.state,\n 'completeness': goal.completeness,\n 'computation_mode': goal.computation_mode,\n 'current': goal.current,\n 'target_goal': goal.target_goal,\n })\n\n all_goals_info.append(serialized_goals_info)\n return all_goals_info",
"def admin_dash():\n if session['user_admin'] == False:\n abort(403)\n\n yesterday = datetime.utcnow() - timedelta(days=1)\n last_week = datetime.utcnow() - timedelta(days=7)\n # Retrieve all Users\n sqa_sess = sqa_session()\n total_users = sqa_sess.query(User).count()\n new_users_yesterday = sqa_sess.query(User).filter(User.Create_Date > yesterday).count()\n new_users_lastweek = sqa_sess.query(User).filter(User.Create_Date > last_week).count()\n\n active_users_yesterday = sqa_sess.query(User).filter(User.Last_Login_Date > yesterday).count()\n active_users_lastweek = sqa_sess.query(User).filter(User.Last_Login_Date > last_week).count()\n\n total_flights = sqa_sess.query(FlightPlan).count()\n new_flights_yesterday = sqa_sess.query(FlightPlan).filter(FlightPlan.Import_Date >= yesterday).count()\n new_flights_lastweek = sqa_sess.query(FlightPlan).filter(FlightPlan.Import_Date >= last_week).count()\n \n\n return render_template('admin/dashboard.html', total_users=total_users, new_users_yesterday=new_users_yesterday, new_users_lastweek=new_users_lastweek,\n active_users_lastweek=active_users_lastweek, active_users_yesterday=active_users_yesterday,\n total_flights=total_flights, new_flights_lastweek=new_flights_lastweek, new_flights_yesterday=new_flights_yesterday)",
"def user_project_view(cls, user, project):\r\n pass",
"def changelist_view(self, request, extra_context=None):\n if request.user.user_type == User.ADMIN_CEA:\n self.list_display = ('user', 'cea', 'booking', 'request_status')\n elif request.user.user_type == User.ADMIN_CRC:\n self.list_display = ('user', 'crc', 'booking', 'request_status')\n elif request.user.user_type == User.EXPRESS_USER:\n self.list_display = ('user', 'payment_type', 'request_status', 'credit_status', 'booking')\n else:\n self.list_display = ('user', 'booking','cea', 'crc', 'transit', 'payment_type', 'request_status',)\n return super(RequestAdmin, self).changelist_view(request, extra_context)",
"def KLP_Users_list(request):\n\n # get logged in user\n\n user = request.user\n if user.id:\n\n # check logged in user permissions, to get user list\n\n KLP_user_Perm(request.user, 'Users', None)\n\n # get all active(1) users list other than staff and super user order by username\n\n user_list = User.objects.filter(is_staff=0,\n is_superuser=0).order_by('username')\n\n # render show users form with users list\n\n return render_to_response('viewtemplates/show_users_form.html',\n {\n 'user_list': user_list,\n 'user': user,\n 'title': 'KLP Users',\n 'legend': 'Karnataka Learning Partnership',\n 'entry': 'Add',\n }, context_instance=RequestContext(request))\n else:\n\n # if user is not logged in redirect to login page\n\n return HttpResponseRedirect('/login/')",
"def view_budgets(self) -> None:\n Menu.prompt_view_budgets()\n for budget in self.user.budget_manager:\n print(f\"{budget}\\n\")",
"def list_plans(self, json_output: bool = False):\n plans, errors = self.rest.list_backup_plans()\n _exit_if_errors(errors)\n if json_output:\n print(json.dumps(plans, indent=2))\n else:\n self.human_print_plans(plans)"
] | [
"0.7063593",
"0.682502",
"0.6374027",
"0.6217655",
"0.61145437",
"0.610118",
"0.6058615",
"0.59781164",
"0.5932294",
"0.5915161",
"0.59006524",
"0.587544",
"0.5792681",
"0.57351124",
"0.5670701",
"0.5654031",
"0.5596579",
"0.5595037",
"0.55796534",
"0.55656326",
"0.552241",
"0.54659665",
"0.54402494",
"0.5438603",
"0.5435412",
"0.54005414",
"0.5396313",
"0.5357132",
"0.53446543",
"0.5324184"
] | 0.6848171 | 1 |
View a list of development plans for active user | def get_all_development_plans_for_user(request):
current_employee = Employee.objects.get(user__pk=request.user.pk)
user_development_plans = DevelopmentPlan.objects.filter(employee_relation=current_employee).all()
if not current_employee:
raise PermissionDenied("You don't have any employee assigned to you.", 401)
if user_development_plans:
data={}
user_development_plans_list = []
for plan in user_development_plans:
development_plan_object_list=[]
dev_plan = {}
dev_plan["id"] = plan.id
dev_plan["deleted"] = plan.deleted
if plan.type:
dev_plan["type"] = plan.type.name
dev_plan["finished_at"] = DevelopmentPlanToEmployeeRelation.objects\
.get(employee=current_employee, development_plan = plan).finished_at
dev_plan["created_at"] = plan.created_at
dev_plan["created_by"] = plan.created_by.username
development_plan_object_list.append({"dev_plan_details":dev_plan})
manager_data = {}
manager_data["manager_username"] = plan.manager_relation.user.username
manager_data["id"] = plan.manager_relation.user.id
development_plan_object_list.append({"manager_data":manager_data})
user_development_plans_list.append(development_plan_object_list)
else:
return JsonResponse(data={"details":"Employee with id={} doesn't have any Development Plan"
.format(request.user.pk)}, status=404)
data = {"user_development_plans:": user_development_plans_list}
return JsonResponse(status=201, data=data) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_active_development_plan_for_user(request):\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n current_development_plan = DevelopmentPlan.objects.filter(\n employee_relation=current_employee,\n employee_relation__developmentplantoemployeerelation__finished_at__isnull=True).first() # is active !!!\n\n if not current_employee:\n raise PermissionDenied()\n\n if current_development_plan:\n data={}\n development_plan_object_list=[]\n dev_plan={}\n dev_plan[\"id\"] = current_development_plan.id\n dev_plan[\"deleted\"] = current_development_plan.deleted\n if current_development_plan.type:\n dev_plan[\"type\"] = current_development_plan.type.name\n dev_plan[\"finished_at\"] = DevelopmentPlanToEmployeeRelation.objects\\\n .get(employee=current_employee, development_plan = current_development_plan)\\\n .finished_at\n\n dev_plan[\"created_at\"] = current_development_plan.created_at\n dev_plan[\"created_by\"] = current_development_plan.created_by.username\n\n development_plan_object_list.append({\"dev_plan_details\":dev_plan})\n\n# manager_relation\n manager_data={}\n manager_data[\"manager_username\"] = current_development_plan.manager_relation.user.username\n manager_data[\"manager_first_name\"] = current_development_plan.manager_relation.user.first_name\n manager_data[\"manager_last_name\"] = current_development_plan.manager_relation.user.last_name\n development_plan_object_list.append({\"manager_data\":manager_data})\n\n# employee_relation\n employee_data={}\n all_employees = current_development_plan.employee_relation.all()\n if all_employees:\n emp_list=[]\n for emp in all_employees:\n emp_data={}\n emp_data[\"id\"] = emp.user.id\n emp_data[\"username\"] = emp.user.username\n emp_data[\"first_name\"] = emp.user.first_name\n emp_data[\"last_name\"] = emp.user.last_name\n emp_data[\"status_questions\"] = emp.status_questions\n\n employee_role = EmployeeRole.objects.filter(employee=emp).all()\n name_role_list = []\n for obj in employee_role:\n name_role_list.append(obj.role.name)\n emp_data[\"roles\"] = name_role_list\n emp_list.append(emp_data)\n employee_data={\"all_employees\":emp_list}\n else:\n return JsonResponse(data={\"details\":\"Any employee has Development Plan with id={}\"\n .format(current_development_plan.id)}, status=404)\n\n development_plan_object_list.append({\"employee_data\":employee_data})\n\n\n# competence_parts\n all_competence_parts = current_development_plan.competence_parts.all()\n\n competence_list = []\n questions_list = []\n sliders_list = []\n\n if all_competence_parts:\n for comp_part in all_competence_parts:\n\n comp_part_data={}\n competence_d={\"competence_parts\": []}\n\n comp_part_data[\"id\"] = comp_part.id\n comp_part_data[\"title\"] = comp_part.title\n comp_part_data[\"description\"] = comp_part.description\n comp_part_data[\"competence_status\"] = comp_part.competence_status\n\n all_questions = comp_part.question_set.all()\n print all_questions\n if all_questions:\n for question in all_questions:\n question_data = {}\n question_data[\"question_id\"] = question.id\n question_data[\"title\"] = question.title\n question_data[\"competence_part\"] = question.competence_part.id\n\n answer = Answer.objects.filter(question__id = question.id,\n employee=current_employee).first()\n\n if answer:\n question_data[\"answer_id\"] = answer.id\n question_data[\"answer\"] = answer.title\n\n questions_list.append(question_data)\n\n comp_part_data[\"questions\"] = questions_list\n\n all_sliders = comp_part.slider_set.all()\n if all_sliders:\n for slider in all_sliders:\n slider_data = {}\n slider_data[\"slider_id\"] = slider.id\n slider_data[\"scale\"] = slider.scale\n slider_data[\"competence_part\"] = slider.competence_part.id\n\n answer = Answer.objects.filter(slider__id = slider.id,\n employee=current_employee).first()\n\n if slider:\n slider_data[\"answer_id\"] = answer.id\n slider_data[\"answer\"] = answer.slider.scale\n\n sliders_list.append(slider_data)\n\n comp_part_data[\"sliders\"] = sliders_list\n\n comp_part_data[\"created_at\"] = comp_part.created_at\n comp_part_data[\"created_by\"] = comp_part.created_by.username\n comp_part_data[\"updated_at\"] = comp_part.updated_at\n comp_part_data[\"updated_by\"] = comp_part.updated_by.username\n\n competence_keys_list = ['id', 'title', 'description',\n 'language_code', 'status']\n\n if not competence_list:\n get_competence_data(competence_keys_list, comp_part.competence, competence_d,\n comp_part_data, competence_list)\n\n else:\n competence_found = False\n for competence_dict in competence_list:\n if competence_dict['id'] == comp_part.competence.id:\n competence_dict['competence_parts'].append(comp_part_data)\n competence_found = True\n break\n\n if not competence_found:\n get_competence_data(competence_keys_list, comp_part.competence, competence_d,\n comp_part_data, competence_list)\n\n development_plan_object_list.append({\"competences\":competence_list})\n\n else:\n return JsonResponse(data={\"details\":\"Development Plan with id={} doesn't have any Competence Part yet\"\n .format(current_development_plan.id)}, status=404)\n\n data = {\"dev_plan:\": development_plan_object_list}\n return JsonResponse(status=201, data=data)\n\n else:\n return JsonResponse(data={\"details\": \"The user with id={} doesn't have an active Development Plan\"\n .format(current_employee.user.id)}, status=404)",
"def plans(self):\n title = self.context.Title()\n return self.portal_catalog(portal_type='Plan', Subject=title)",
"def plan_list_get(request):\n return list_by_company_guid(request, PlanModel)",
"def plan_get(request):\n company = auth_api_key(request)\n plan = get_and_check_plan(request, company)\n return plan",
"def all_plans(request):\n\n plans = Plan.objects.all()\n\n context = {\n 'plans': plans,\n }\n\n return render(request, 'plans/plans.html', context)",
"def show(self):\n self.parser.add_argument('plan_uuid',\n help=\"Plan uuid or name\")\n args = self.parser.parse_args()\n response = self.client.plans.find(name_or_id=args.plan_uuid)\n fields = ['uuid', 'name', 'description', 'uri']\n data = dict([(f, getattr(response, f, ''))\n for f in fields])\n cliutils.print_dict(data, wrap=72)",
"def get_all_user_development_plans_for_manager(request, employee_id):\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n user_development_plans = DevelopmentPlan.objects.filter(employee_relation=current_employee).all()\n employee = Employee.objects.filter(pk=int(employee_id)).first()\n\n if not current_employee:\n raise PermissionDenied(\"You don't have any employee assigned to you.\", 401)\n\n if not current_employee.isEnsoUser() and current_employee.is_manager:\n raise PermissionDenied()\n actions = employee.action_set.all()\n if not int(employee_id) in [obj.id for obj in Employee.objects.filter(manager=current_employee).all()]:\n raise PermissionDenied(\"Employee with id={} is not assigned to you.\".format(employee_id), 401)\n\n if user_development_plans:\n data={}\n user_development_plans_list = []\n for plan in user_development_plans:\n\n development_plan_object_list=[]\n dev_plan = {}\n\n dev_plan[\"id\"] = plan.id\n dev_plan[\"deleted\"] = plan.deleted\n if plan.type:\n dev_plan[\"type\"] = plan.type.name\n dev_plan[\"finished_at\"] = DevelopmentPlanToEmployeeRelation.objects\\\n .get(employee=current_employee, development_plan = plan).finished_at\n dev_plan[\"created_at\"] = plan.created_at\n dev_plan[\"created_by\"] = plan.created_by.username\n\n development_plan_object_list.append({\"dev_plan_details\":dev_plan})\n\n manager_data = {}\n manager_data[\"manager_username\"] = plan.manager_relation.user.username\n manager_data[\"id\"] = plan.manager_relation.user.id\n\n development_plan_object_list.append({\"manager_data\":manager_data})\n user_development_plans_list.append(development_plan_object_list)\n\n else:\n return JsonResponse(data={\"details\":\"Employee with id={} doesn't have any Development Plan\"\n .format(request.user.pk)}, status=404)\n\n data = {\"user_development_plans:\": user_development_plans_list}\n return JsonResponse(status=201, data=data)",
"def plans():",
"def list(cls):\n return cls().requests.get('plan')",
"def plans(self):\r\n return pl.Plans(self)",
"def get_plans(user, title=None, category=None, priority=None, status=None,\n id=None, orderby=None):\n user = get_user(user)\n filters = create_filters(id, title, category,\n priority, status)\n selection = user.plans.filter(**filters)\n\n if orderby:\n selection = selection.order_by(orderby)\n\n if not len(selection):\n raise ObjectDoesNotFound('There is no plans with selected filters.')\n return selection",
"def get_plan(self):\n\t\tresponse = self.client.get(self._endpoint + \"/plan\")\n\t\tplan = response.json['plans']\n\t\tplan = list(plan.items())[0][1]\n\t\treturn Plan(plan['plan_id'],data=plan)",
"def development_plan_details(request, development_plan_id): #, employee_id ):\n # employee = Employee.objects.get(user__pk=request.user.pk)\n # employee = Employee.objects.filter(pk=int(employee_id)).first()\n\n development_plan = DevelopmentPlan.objects.get(pk=int(development_plan_id))\n current_employee = Employee.objects.filter(user__pk=request.user.pk).first()\n all_employees = development_plan.employee_relation.all()\n\n try:\n development_plan = DevelopmentPlan.objects.get(pk=int(development_plan_id))\n data={}\n development_plan_object_list=[]\n dev_plan={}\n dev_plan[\"id\"] = development_plan.id\n dev_plan[\"deleted\"] = development_plan.deleted\n if development_plan.type:\n dev_plan[\"type\"] = development_plan.type.name\n # dev_plan[\"finished_at\"] = DevelopmentPlanToEmployeeRelation.objects.get(development_plan = development_plan)\\\n # .finished_at\n\n dev_plan[\"created_at\"] = development_plan.created_at\n dev_plan[\"created_by\"] = development_plan.created_by.username\n\n development_plan_object_list.append({\"dev_plan_details\":dev_plan})\n\n# manager_relation\n manager_data={}\n manager_data[\"manager_username\"] = development_plan.manager_relation.user.username\n manager_data[\"manager_first_name\"] = development_plan.manager_relation.user.first_name\n manager_data[\"manager_last_name\"] = development_plan.manager_relation.user.last_name\n development_plan_object_list.append({\"manager_data\":manager_data})\n\n# employee_relation\n employee_data={}\n all_employees = development_plan.employee_relation.all()\n if all_employees:\n emp_list=[]\n for emp in all_employees:\n emp_data={}\n emp_data[\"id\"] = emp.user.id\n emp_data[\"username\"] = emp.user.username\n emp_data[\"first_name\"] = emp.user.first_name\n emp_data[\"last_name\"] = emp.user.last_name\n emp_data[\"status_questions\"] = emp.status_questions\n\n emp_data[\"dev_plan_finished_at\"] = DevelopmentPlanToEmployeeRelation\\\n .objects.get(employee=emp,\n development_plan = development_plan)\\\n .finished_at\n\n employee_role = EmployeeRole.objects.filter(employee=emp).all()\n name_role_list = []\n for obj in employee_role:\n name_role_list.append(obj.role.name)\n emp_data[\"roles\"] = name_role_list\n emp_list.append(emp_data)\n employee_data={\"all_employees\":emp_list}\n else:\n return JsonResponse(data={\"details\":\"Any employee has Development Plan with id={}\"\n .format(development_plan.id)}, status=404)\n\n development_plan_object_list.append({\"employee_data\":employee_data})\n\n\n# competence_parts\n all_competence_parts = development_plan.competence_parts.all()\n\n competence_list = []\n questions_list = []\n sliders_list = []\n\n if all_competence_parts:\n for comp_part in all_competence_parts:\n\n comp_part_data={}\n competence_d={\"competence_parts\": []}\n\n comp_part_data[\"id\"] = comp_part.id\n comp_part_data[\"title\"] = comp_part.title\n comp_part_data[\"description\"] = comp_part.description\n comp_part_data[\"competence_status\"] = comp_part.competence_status\n\n all_questions = comp_part.question_set.all()\n if all_questions:\n for question in all_questions:\n question_data = {}\n question_data[\"question_id\"] = question.id\n question_data[\"title\"] = question.title\n question_data[\"competence_part\"] = question.competence_part.id\n\n answer = Answer.objects.filter(question__id = question.id).first() #employee=current_employee\n\n if answer:\n question_data[\"answer_id\"] = answer.id\n question_data[\"answer\"] = answer.title\n\n questions_list.append(question_data)\n\n comp_part_data[\"questions\"] = questions_list\n\n all_sliders = comp_part.slider_set.all()\n if all_sliders:\n for slider in all_sliders:\n slider_data = {}\n slider_data[\"slider_id\"] = slider.id\n slider_data[\"scale\"] = slider.scale\n slider_data[\"competence_part\"] = slider.competence_part.id\n\n answer = Answer.objects.filter(slider__id = slider.id).first() #employee=current_employee\n\n if slider:\n slider_data[\"answer_id\"] = answer.id\n slider_data[\"answer\"] = answer.slider.scale\n\n sliders_list.append(slider_data)\n\n comp_part_data[\"sliders\"] = sliders_list\n\n comp_part_data[\"created_at\"] = comp_part.created_at\n comp_part_data[\"created_by\"] = comp_part.created_by.username\n comp_part_data[\"updated_at\"] = comp_part.updated_at\n comp_part_data[\"updated_by\"] = comp_part.updated_by.username\n\n competence_keys_list = ['id', 'title', 'description',\n 'language_code', 'status']\n\n if not competence_list:\n get_competence_data(competence_keys_list, comp_part.competence, competence_d,\n comp_part_data, competence_list)\n else:\n competence_found = False\n for competence_dict in competence_list:\n if competence_dict['id'] == comp_part.competence.id:\n competence_dict['competence_parts'].append(comp_part_data)\n competence_found = True\n break\n\n if not competence_found:\n get_competence_data(competence_keys_list, comp_part.competence, competence_d,\n comp_part_data, competence_list)\n\n development_plan_object_list.append({\"competences\":competence_list})\n\n else:\n return JsonResponse(data={\"details\":\"Development Plan with id={} doesn't have any Competence Part yet\"\n .format(development_plan.id)}, status=404)\n\n data = {\"dev_plan:\": development_plan_object_list}\n return JsonResponse(status=201, data=data)\n\n except DevelopmentPlan.DoesNotExist:\n return JsonResponse(data={\"details\":\"Development Plan with this id doesn't exist\"}, status=404)",
"def plan_detail(request, plan_id):\n\n plan = get_object_or_404(Plan, pk=plan_id)\n\n context = {\n 'plan': plan,\n }\n\n return render(request, 'plans/plan_detail.html', context)",
"def new_flight_plan(self):\n r = requests.post(self.base_url + f'/users/{self.username}/flight-plans')\n return r.text",
"def dashboard(request):\n employee = request.user.employee_user.first()\n widgets = list()\n # development_plans = employee.getDevelopmentPlans()\n if employee.is_manager:\n widgets.append(dict(\n # template=\"mus/_widget_waiting_developmentplans.html\",\n data=employee.getMyEmployees(),\n # title=_('Expecting preparation guides from')\n ))\n widgets.append(dict(\n # template=\"mus/_widget_todo_developmentplans.html\",\n data=employee.getMyEmployees(),\n # title=_('Preparation guides to do')\n ))\n # widgets.append(dict(\n # template = \"mus/_widget_my_developmentplans.html\",\n # data = development_plans,\n # title = _('My development plans')\n # ))\n return JsonResponse(status=200,data={\n # 'widgets': model_to_dict(widgets),\n 'employee': model_to_dict(employee),\n # 'development_plans': development_plans\n })",
"def get_plans(self):\n return stripe.Plan.all()",
"def plans(self):\r\n return Plans(self)",
"def plans():\n results = []\n if 'qry' in request.args:\n look_for = request.args['qry']\n if look_for[0] == '*':\n look_for = ''\n zipcode = request.args['zipcode']\n\n try:\n plan = request.args['plan']\n except KeyError:\n return None\n\n # If this is a medicaid or private plan\n where = tools.get_location(zipcode)\n if where:\n if plan in ('medicaid', 'private'):\n state = where.STATE\n results = PlanNames.by_state(state, look_for, plan=='medicaid')\n results = [r.plan_name for r in results]\n if state == 'OH':\n results.append('OH State Medicaid')\n elif plan == 'medicare':\n county_code = where.GEO.COUNTY_CODE\n ma_region = where.GEO.MA_REGION_CODE\n pdp_region = where.GEO.PDP_REGION_CODE\n results = Plans.find_in_county(county_code, ma_region, pdp_region, look_for)\n\n return jsonify(sorted(results))",
"def show(ctx, project_id, backend):\n try:\n project = ctx.obj['projects_db'].get(project_id, backend)\n except IOError:\n raise Exception(\"Error: the projects database file doesn't exist. \"\n \"Please run `taxi update` to create it\")\n\n if project is None:\n ctx.obj['view'].err(\n \"Could not find project `%s`\" % (project_id)\n )\n else:\n ctx.obj['view'].project_with_activities(project)",
"def projects_view(request):\n\n # The projects to be displayed. Only the ones in which the logged in user is involved\n projects = request.user.projets.all().order_by('name')\n return render(request, 'projects.html', locals())",
"def test_users_listed(self):\n url = reverse('admin:core_user_changelist')\n res = self.client.get(url)\n\n self.assertContains(res, self.user.name)\n self.assertContains(res, self.user.plan)",
"def get(self):\n return {\n \"plans\": PLANS,\n }",
"def view_projects(request):\n current_user=request.user\n current_user_name=current_user.username\n projects=Project.objects.all()\n return render(request, 'view_projects.html',{'projects':projects, 'current_user_name':current_user})",
"def __str__(self):\n return self.plan",
"def show(ctx):\n skale = ctx.obj['skale']\n # from skale.utils.contracts_provision.main import add_test_permissions\n # add_test_permissions(skale)\n show_all_schains_names(skale)",
"def view_project():\n\n project_title = request.args.get('title')\n\n description, max_grade = hackbright.get_project_info(project_title)\n\n student_grades = hackbright.list_students_by_completed_project(project_title)\n\n return render_template(\"project_info.html\",\n title=project_title,\n description=description,\n max_grade=max_grade,\n student_grades=student_grades)",
"def user_project_view(cls, user, project):\r\n pass",
"def get(cls, plan_id):\n return cls().requests.get(f\"plan/{plan_id}\")",
"def report_development(request):\n q = Q(app_status__name__iequals='Current Version') # actual?\n q = q | Q(app_status__name__iequals='In Development') # projected?\n q = q | Q(app_status__name__iequals='In Suspense') # supense\n q = q | Q(app_status__name__iequals='Unassigned') # TBD?\n apps = Application.objects.filter(q).values('release_date', 'release', 'acronym', 'sr_number', 'owner_org', 'nasa_requester', 'release_change_description', 'app_status__name').order_by('release_date', 'acronym', 'release')\n return render_to_response('report/app_pipeline_abbrev.html',\n {'object_list': apps,\n 'search_suggestions': _search_suggestions(),\n },\n context_instance=RequestContext(request));"
] | [
"0.69843084",
"0.6867608",
"0.6754407",
"0.65818614",
"0.6510022",
"0.64818734",
"0.62669057",
"0.6181612",
"0.6179575",
"0.6045006",
"0.6030298",
"0.60151047",
"0.60096705",
"0.59797555",
"0.5899104",
"0.5885478",
"0.58685434",
"0.5831174",
"0.5770537",
"0.57329553",
"0.5725271",
"0.5691866",
"0.56607217",
"0.5653614",
"0.55828154",
"0.5575905",
"0.5567765",
"0.5548165",
"0.55380464",
"0.5536073"
] | 0.7084471 | 0 |
View active development plan for active user | def get_active_development_plan_for_user(request):
current_employee = Employee.objects.get(user__pk=request.user.pk)
current_development_plan = DevelopmentPlan.objects.filter(
employee_relation=current_employee,
employee_relation__developmentplantoemployeerelation__finished_at__isnull=True).first() # is active !!!
if not current_employee:
raise PermissionDenied()
if current_development_plan:
data={}
development_plan_object_list=[]
dev_plan={}
dev_plan["id"] = current_development_plan.id
dev_plan["deleted"] = current_development_plan.deleted
if current_development_plan.type:
dev_plan["type"] = current_development_plan.type.name
dev_plan["finished_at"] = DevelopmentPlanToEmployeeRelation.objects\
.get(employee=current_employee, development_plan = current_development_plan)\
.finished_at
dev_plan["created_at"] = current_development_plan.created_at
dev_plan["created_by"] = current_development_plan.created_by.username
development_plan_object_list.append({"dev_plan_details":dev_plan})
# manager_relation
manager_data={}
manager_data["manager_username"] = current_development_plan.manager_relation.user.username
manager_data["manager_first_name"] = current_development_plan.manager_relation.user.first_name
manager_data["manager_last_name"] = current_development_plan.manager_relation.user.last_name
development_plan_object_list.append({"manager_data":manager_data})
# employee_relation
employee_data={}
all_employees = current_development_plan.employee_relation.all()
if all_employees:
emp_list=[]
for emp in all_employees:
emp_data={}
emp_data["id"] = emp.user.id
emp_data["username"] = emp.user.username
emp_data["first_name"] = emp.user.first_name
emp_data["last_name"] = emp.user.last_name
emp_data["status_questions"] = emp.status_questions
employee_role = EmployeeRole.objects.filter(employee=emp).all()
name_role_list = []
for obj in employee_role:
name_role_list.append(obj.role.name)
emp_data["roles"] = name_role_list
emp_list.append(emp_data)
employee_data={"all_employees":emp_list}
else:
return JsonResponse(data={"details":"Any employee has Development Plan with id={}"
.format(current_development_plan.id)}, status=404)
development_plan_object_list.append({"employee_data":employee_data})
# competence_parts
all_competence_parts = current_development_plan.competence_parts.all()
competence_list = []
questions_list = []
sliders_list = []
if all_competence_parts:
for comp_part in all_competence_parts:
comp_part_data={}
competence_d={"competence_parts": []}
comp_part_data["id"] = comp_part.id
comp_part_data["title"] = comp_part.title
comp_part_data["description"] = comp_part.description
comp_part_data["competence_status"] = comp_part.competence_status
all_questions = comp_part.question_set.all()
print all_questions
if all_questions:
for question in all_questions:
question_data = {}
question_data["question_id"] = question.id
question_data["title"] = question.title
question_data["competence_part"] = question.competence_part.id
answer = Answer.objects.filter(question__id = question.id,
employee=current_employee).first()
if answer:
question_data["answer_id"] = answer.id
question_data["answer"] = answer.title
questions_list.append(question_data)
comp_part_data["questions"] = questions_list
all_sliders = comp_part.slider_set.all()
if all_sliders:
for slider in all_sliders:
slider_data = {}
slider_data["slider_id"] = slider.id
slider_data["scale"] = slider.scale
slider_data["competence_part"] = slider.competence_part.id
answer = Answer.objects.filter(slider__id = slider.id,
employee=current_employee).first()
if slider:
slider_data["answer_id"] = answer.id
slider_data["answer"] = answer.slider.scale
sliders_list.append(slider_data)
comp_part_data["sliders"] = sliders_list
comp_part_data["created_at"] = comp_part.created_at
comp_part_data["created_by"] = comp_part.created_by.username
comp_part_data["updated_at"] = comp_part.updated_at
comp_part_data["updated_by"] = comp_part.updated_by.username
competence_keys_list = ['id', 'title', 'description',
'language_code', 'status']
if not competence_list:
get_competence_data(competence_keys_list, comp_part.competence, competence_d,
comp_part_data, competence_list)
else:
competence_found = False
for competence_dict in competence_list:
if competence_dict['id'] == comp_part.competence.id:
competence_dict['competence_parts'].append(comp_part_data)
competence_found = True
break
if not competence_found:
get_competence_data(competence_keys_list, comp_part.competence, competence_d,
comp_part_data, competence_list)
development_plan_object_list.append({"competences":competence_list})
else:
return JsonResponse(data={"details":"Development Plan with id={} doesn't have any Competence Part yet"
.format(current_development_plan.id)}, status=404)
data = {"dev_plan:": development_plan_object_list}
return JsonResponse(status=201, data=data)
else:
return JsonResponse(data={"details": "The user with id={} doesn't have an active Development Plan"
.format(current_employee.user.id)}, status=404) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def plan_get(request):\n company = auth_api_key(request)\n plan = get_and_check_plan(request, company)\n return plan",
"def plans(self):\n title = self.context.Title()\n return self.portal_catalog(portal_type='Plan', Subject=title)",
"def get_all_development_plans_for_user(request):\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n user_development_plans = DevelopmentPlan.objects.filter(employee_relation=current_employee).all()\n\n if not current_employee:\n raise PermissionDenied(\"You don't have any employee assigned to you.\", 401)\n\n if user_development_plans:\n data={}\n user_development_plans_list = []\n for plan in user_development_plans:\n\n development_plan_object_list=[]\n dev_plan = {}\n\n dev_plan[\"id\"] = plan.id\n dev_plan[\"deleted\"] = plan.deleted\n if plan.type:\n dev_plan[\"type\"] = plan.type.name\n dev_plan[\"finished_at\"] = DevelopmentPlanToEmployeeRelation.objects\\\n .get(employee=current_employee, development_plan = plan).finished_at\n dev_plan[\"created_at\"] = plan.created_at\n dev_plan[\"created_by\"] = plan.created_by.username\n\n development_plan_object_list.append({\"dev_plan_details\":dev_plan})\n\n manager_data = {}\n manager_data[\"manager_username\"] = plan.manager_relation.user.username\n manager_data[\"id\"] = plan.manager_relation.user.id\n\n development_plan_object_list.append({\"manager_data\":manager_data})\n user_development_plans_list.append(development_plan_object_list)\n\n else:\n return JsonResponse(data={\"details\":\"Employee with id={} doesn't have any Development Plan\"\n .format(request.user.pk)}, status=404)\n\n data = {\"user_development_plans:\": user_development_plans_list}\n return JsonResponse(status=201, data=data)",
"def plan_list_get(request):\n return list_by_company_guid(request, PlanModel)",
"def show(self):\n self.parser.add_argument('plan_uuid',\n help=\"Plan uuid or name\")\n args = self.parser.parse_args()\n response = self.client.plans.find(name_or_id=args.plan_uuid)\n fields = ['uuid', 'name', 'description', 'uri']\n data = dict([(f, getattr(response, f, ''))\n for f in fields])\n cliutils.print_dict(data, wrap=72)",
"def all_plans(request):\n\n plans = Plan.objects.all()\n\n context = {\n 'plans': plans,\n }\n\n return render(request, 'plans/plans.html', context)",
"def plans():",
"def plan_detail(request, plan_id):\n\n plan = get_object_or_404(Plan, pk=plan_id)\n\n context = {\n 'plan': plan,\n }\n\n return render(request, 'plans/plan_detail.html', context)",
"def show(ctx, project_id, backend):\n try:\n project = ctx.obj['projects_db'].get(project_id, backend)\n except IOError:\n raise Exception(\"Error: the projects database file doesn't exist. \"\n \"Please run `taxi update` to create it\")\n\n if project is None:\n ctx.obj['view'].err(\n \"Could not find project `%s`\" % (project_id)\n )\n else:\n ctx.obj['view'].project_with_activities(project)",
"def show():\n info(str(Project))",
"def plan(self):\n return read_small_file(self.homeDirectory + \"/.plan\")",
"def view_projects(request):\n current_user=request.user\n current_user_name=current_user.username\n projects=Project.objects.all()\n return render(request, 'view_projects.html',{'projects':projects, 'current_user_name':current_user})",
"def new_flight_plan(self):\n r = requests.post(self.base_url + f'/users/{self.username}/flight-plans')\n return r.text",
"def user_project_view(cls, user, project):\r\n pass",
"def __str__(self):\n return self.plan.title",
"def __str__(self):\n return self.plan",
"def get_plan(self):\n\t\tresponse = self.client.get(self._endpoint + \"/plan\")\n\t\tplan = response.json['plans']\n\t\tplan = list(plan.items())[0][1]\n\t\treturn Plan(plan['plan_id'],data=plan)",
"def plan(self):\n\n plan = f\"\"\"\n Input parameters: {self.params}\n Product: {self.product}\n\n Source code:\n {self.source_code}\n \"\"\"\n\n print(plan)",
"def view_project():\n\n project_title = request.args.get('title')\n\n description, max_grade = hackbright.get_project_info(project_title)\n\n student_grades = hackbright.list_students_by_completed_project(project_title)\n\n return render_template(\"project_info.html\",\n title=project_title,\n description=description,\n max_grade=max_grade,\n student_grades=student_grades)",
"def get_plan(self):\n sub = self.get_subscription()\n return sub.plan",
"def get_and_check_plan(request, company):\n model = PlanModel(request.session)\n guid = request.matchdict['plan_guid']\n plan = model.get(guid)\n if plan is None:\n raise HTTPNotFound('No such plan {}'.format(guid))\n if plan.company_guid != company.guid:\n raise HTTPForbidden('You have no permission to access plan {}'\n .format(guid))\n return plan",
"def show(self):\n return self._project.show()",
"def get_all_user_development_plans_for_manager(request, employee_id):\n current_employee = Employee.objects.get(user__pk=request.user.pk)\n user_development_plans = DevelopmentPlan.objects.filter(employee_relation=current_employee).all()\n employee = Employee.objects.filter(pk=int(employee_id)).first()\n\n if not current_employee:\n raise PermissionDenied(\"You don't have any employee assigned to you.\", 401)\n\n if not current_employee.isEnsoUser() and current_employee.is_manager:\n raise PermissionDenied()\n actions = employee.action_set.all()\n if not int(employee_id) in [obj.id for obj in Employee.objects.filter(manager=current_employee).all()]:\n raise PermissionDenied(\"Employee with id={} is not assigned to you.\".format(employee_id), 401)\n\n if user_development_plans:\n data={}\n user_development_plans_list = []\n for plan in user_development_plans:\n\n development_plan_object_list=[]\n dev_plan = {}\n\n dev_plan[\"id\"] = plan.id\n dev_plan[\"deleted\"] = plan.deleted\n if plan.type:\n dev_plan[\"type\"] = plan.type.name\n dev_plan[\"finished_at\"] = DevelopmentPlanToEmployeeRelation.objects\\\n .get(employee=current_employee, development_plan = plan).finished_at\n dev_plan[\"created_at\"] = plan.created_at\n dev_plan[\"created_by\"] = plan.created_by.username\n\n development_plan_object_list.append({\"dev_plan_details\":dev_plan})\n\n manager_data = {}\n manager_data[\"manager_username\"] = plan.manager_relation.user.username\n manager_data[\"id\"] = plan.manager_relation.user.id\n\n development_plan_object_list.append({\"manager_data\":manager_data})\n user_development_plans_list.append(development_plan_object_list)\n\n else:\n return JsonResponse(data={\"details\":\"Employee with id={} doesn't have any Development Plan\"\n .format(request.user.pk)}, status=404)\n\n data = {\"user_development_plans:\": user_development_plans_list}\n return JsonResponse(status=201, data=data)",
"def user_project_view(cls, user, project):\n pass",
"def view():\n login_dict = _open_cnfg()\n login_name, login_url, login_api, login_hid = ['Login name'], ['URL'], ['API key'], ['History ID']\n for lgn in login_dict['logins']:\n login_name.append(lgn)\n login_url.append(login_dict['logins'][lgn]['url'])\n login_api.append(login_dict['logins'][lgn]['api_key'])\n login_hid.append(login_dict['logins'][lgn]['hid'])\n click.echo(\"You are currently using active login: \" + click.style(login_dict['active_login'], bold=True))\n utils._tabulate([login_name, login_url, login_api, login_hid])",
"def my_dashboard_print(request):\n #Get the associated contact for our user\n user_con = request.user.contact\n qs_proj_assoc, qs_task_assoc = get_tiered_upcoming(user_con)\n\n #Get the projects associated with the user\n user_proj_table = table_assoc.ProjectAssocTable_Printable(qs_proj_assoc)\n #Get the tasks associated with the user\n user_task_table = table_assoc.TaskAssocTable_Printable(qs_task_assoc)\n\n # Render the HTML template index.html with the data in the context variable\n return render(\n request,\n 'my_dashboard_printable.html',\n context={\n 'user_con':user_con,\n 'user_proj_table':user_proj_table,\n 'user_task_table':user_task_table,\n },\n )",
"def currently_in_development_page(request, model='', header='', form_data=None):\n html = render_to_string(\"currently_in_development.html\")\n return html",
"def list(cls):\n return cls().requests.get('plan')",
"def get_and_display_project():\n\n project = request.args.get('project')\n\n title, description, max_grade = hackbright.get_project_by_title(project)\n\n\n github_grade_list = hackbright.get_grades_by_title(project)\n\n return render_template(\"project_info.html\",\n title=title,\n description=description,\n max_grade=max_grade,\n github_grade_list=github_grade_list)",
"def development_plan_details(request, development_plan_id): #, employee_id ):\n # employee = Employee.objects.get(user__pk=request.user.pk)\n # employee = Employee.objects.filter(pk=int(employee_id)).first()\n\n development_plan = DevelopmentPlan.objects.get(pk=int(development_plan_id))\n current_employee = Employee.objects.filter(user__pk=request.user.pk).first()\n all_employees = development_plan.employee_relation.all()\n\n try:\n development_plan = DevelopmentPlan.objects.get(pk=int(development_plan_id))\n data={}\n development_plan_object_list=[]\n dev_plan={}\n dev_plan[\"id\"] = development_plan.id\n dev_plan[\"deleted\"] = development_plan.deleted\n if development_plan.type:\n dev_plan[\"type\"] = development_plan.type.name\n # dev_plan[\"finished_at\"] = DevelopmentPlanToEmployeeRelation.objects.get(development_plan = development_plan)\\\n # .finished_at\n\n dev_plan[\"created_at\"] = development_plan.created_at\n dev_plan[\"created_by\"] = development_plan.created_by.username\n\n development_plan_object_list.append({\"dev_plan_details\":dev_plan})\n\n# manager_relation\n manager_data={}\n manager_data[\"manager_username\"] = development_plan.manager_relation.user.username\n manager_data[\"manager_first_name\"] = development_plan.manager_relation.user.first_name\n manager_data[\"manager_last_name\"] = development_plan.manager_relation.user.last_name\n development_plan_object_list.append({\"manager_data\":manager_data})\n\n# employee_relation\n employee_data={}\n all_employees = development_plan.employee_relation.all()\n if all_employees:\n emp_list=[]\n for emp in all_employees:\n emp_data={}\n emp_data[\"id\"] = emp.user.id\n emp_data[\"username\"] = emp.user.username\n emp_data[\"first_name\"] = emp.user.first_name\n emp_data[\"last_name\"] = emp.user.last_name\n emp_data[\"status_questions\"] = emp.status_questions\n\n emp_data[\"dev_plan_finished_at\"] = DevelopmentPlanToEmployeeRelation\\\n .objects.get(employee=emp,\n development_plan = development_plan)\\\n .finished_at\n\n employee_role = EmployeeRole.objects.filter(employee=emp).all()\n name_role_list = []\n for obj in employee_role:\n name_role_list.append(obj.role.name)\n emp_data[\"roles\"] = name_role_list\n emp_list.append(emp_data)\n employee_data={\"all_employees\":emp_list}\n else:\n return JsonResponse(data={\"details\":\"Any employee has Development Plan with id={}\"\n .format(development_plan.id)}, status=404)\n\n development_plan_object_list.append({\"employee_data\":employee_data})\n\n\n# competence_parts\n all_competence_parts = development_plan.competence_parts.all()\n\n competence_list = []\n questions_list = []\n sliders_list = []\n\n if all_competence_parts:\n for comp_part in all_competence_parts:\n\n comp_part_data={}\n competence_d={\"competence_parts\": []}\n\n comp_part_data[\"id\"] = comp_part.id\n comp_part_data[\"title\"] = comp_part.title\n comp_part_data[\"description\"] = comp_part.description\n comp_part_data[\"competence_status\"] = comp_part.competence_status\n\n all_questions = comp_part.question_set.all()\n if all_questions:\n for question in all_questions:\n question_data = {}\n question_data[\"question_id\"] = question.id\n question_data[\"title\"] = question.title\n question_data[\"competence_part\"] = question.competence_part.id\n\n answer = Answer.objects.filter(question__id = question.id).first() #employee=current_employee\n\n if answer:\n question_data[\"answer_id\"] = answer.id\n question_data[\"answer\"] = answer.title\n\n questions_list.append(question_data)\n\n comp_part_data[\"questions\"] = questions_list\n\n all_sliders = comp_part.slider_set.all()\n if all_sliders:\n for slider in all_sliders:\n slider_data = {}\n slider_data[\"slider_id\"] = slider.id\n slider_data[\"scale\"] = slider.scale\n slider_data[\"competence_part\"] = slider.competence_part.id\n\n answer = Answer.objects.filter(slider__id = slider.id).first() #employee=current_employee\n\n if slider:\n slider_data[\"answer_id\"] = answer.id\n slider_data[\"answer\"] = answer.slider.scale\n\n sliders_list.append(slider_data)\n\n comp_part_data[\"sliders\"] = sliders_list\n\n comp_part_data[\"created_at\"] = comp_part.created_at\n comp_part_data[\"created_by\"] = comp_part.created_by.username\n comp_part_data[\"updated_at\"] = comp_part.updated_at\n comp_part_data[\"updated_by\"] = comp_part.updated_by.username\n\n competence_keys_list = ['id', 'title', 'description',\n 'language_code', 'status']\n\n if not competence_list:\n get_competence_data(competence_keys_list, comp_part.competence, competence_d,\n comp_part_data, competence_list)\n else:\n competence_found = False\n for competence_dict in competence_list:\n if competence_dict['id'] == comp_part.competence.id:\n competence_dict['competence_parts'].append(comp_part_data)\n competence_found = True\n break\n\n if not competence_found:\n get_competence_data(competence_keys_list, comp_part.competence, competence_d,\n comp_part_data, competence_list)\n\n development_plan_object_list.append({\"competences\":competence_list})\n\n else:\n return JsonResponse(data={\"details\":\"Development Plan with id={} doesn't have any Competence Part yet\"\n .format(development_plan.id)}, status=404)\n\n data = {\"dev_plan:\": development_plan_object_list}\n return JsonResponse(status=201, data=data)\n\n except DevelopmentPlan.DoesNotExist:\n return JsonResponse(data={\"details\":\"Development Plan with this id doesn't exist\"}, status=404)"
] | [
"0.6693725",
"0.66212505",
"0.6312085",
"0.61751384",
"0.61356807",
"0.60612977",
"0.60564137",
"0.59662765",
"0.5901057",
"0.58823967",
"0.5879139",
"0.585081",
"0.58050555",
"0.57663673",
"0.5763864",
"0.57417154",
"0.5737406",
"0.57106596",
"0.57047117",
"0.56790864",
"0.5674888",
"0.5672883",
"0.56625134",
"0.5657511",
"0.56480134",
"0.5645293",
"0.5624678",
"0.5597521",
"0.5568976",
"0.5560515"
] | 0.6910045 | 0 |
Get or Update goal by id | def self_goal_by_id(request, goal_id):
current_user = request.user
fields_map = {
'goal_answers': lambda g: [
{
'id': answ.id,
'title': answ.title,
"created_by": answ.created_by.username,
"created_at": answ.created_at,
"file": answ.file.url
} for answ in g.goal_answers.all()
]
}
fields = ['title', 'goal_answers', 'id', 'is_achieved']
goal = Goal.objects.get(pk=goal_id)
if request.method == 'POST':
if goal.created_by != current_user:
raise PermissionDenied("You can edit only your own goals")
f = GoalForm(data=request.json_body)
if not f.is_valid():
return JsonResponse(data={"detail": json.loads(f.errors.as_json())}, status=400)
goal = f.save(current_user, goal)
return JsonResponse(
data={f: fields_map[f](goal) if f in fields_map else getattr(goal, f) for f in fields}, status=200
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def goal(self, goal_id):\r\n return goals.Goal(self, goal_id)",
"def goal(self, goal_id):\r\n return Goal(self, goal_id)",
"def getById(self, id_goals):\n lparam = [id_goals]\n rep = AbstractDAO._read(self, R_READBYID, lparam)\n return self.__fetch_to_object(rep, True)",
"def updateOne(id):\n print(inspect.stack()[1][3])\n # read data from the API call\n req_data = request.get_json()\n\n query = select([Followup]).where(Followup.columns.id == id)\n ResultProxy = connection.execute(query)\n ResultSet = ResultProxy.fetchone()\n if(not ResultSet):\n return {'error': 'Unable to Find the given client'}\n\n # Update the URL\n json_data = {}\n\n for req in req_data:\n if (req in Followup.c.keys()):\n json_data[req] = req_data[req]\n\n query = (\n update(Followup).\n where(Followup.columns.id == id).\n values(json_data)\n )\n ResultProxy = connection.execute(query)\n if(not ResultProxy):\n return {'error': 'Unable to Update the given client'}\n return {'status': \"Update Succesful\"}",
"def put(self, id):\n activity = Activity().get(id)\n if not activity:\n abort(404, \"Activity not found\")\n\n return activity._update(request.json)",
"def find_goal(self, concl, goal_id):\n prf = self.prf\n try:\n for n in goal_id:\n for item in prf.items[:n]:\n if item.th is not None and item.th.can_prove(concl):\n return item.id\n prf = prf.items[n].subproof\n except (AttributeError, IndexError):\n raise TacticException()",
"def update_goal(self):\n pass",
"def put(self, problem_id):\n args = self.request.arguments\n x = args.pop('latitude')\n y = args.pop('longitude')\n args['location'] = create_location(x, y)\n self.sess.query(Problem).filter_by(id=int(problem_id)). \\\n update(args)\n\n self.sess.commit()\n\n activity = ProblemsActivity(\n problem_id=int(problem_id),\n user_id=self.get_current_user(),\n datetime=get_datetime(),\n activity_type=\"UPDATED\"\n )\n self.sess.add(activity)\n self.sess.commit()",
"def put(self,id):\r\n data = request.json\r\n return update(id=id,data=data)",
"def put(self, id):\n adm = Administration()\n print(api.payload)\n p = Person.from_dict(api.payload)\n if p is not None:\n p.set_id(id)\n adm.save_person(p)\n return p, 200\n\n else:\n return '', 500",
"def put(self, id):\n adm = Administration()\n print(api.payload)\n lp = LearnProfile.from_dict(api.payload)\n if lp is not None:\n lp.set_id(id)\n adm.save_learnprofile(lp)\n return lp, 200\n\n else:\n return '', 500",
"def put(self, id):\r\n try:\r\n self.valid_args()\r\n existing = db.session.query(self.__class__).get(id)\r\n if existing is None:\r\n raise NotFound\r\n getattr(require, self.__class__.__name__.lower()).update(existing)\r\n data = json.loads(request.data)\r\n # may be missing the id as we allow partial updates\r\n data['id'] = id\r\n # Clean HATEOAS args\r\n data = self.hateoas.remove_links(data)\r\n inst = self.__class__(**data)\r\n db.session.merge(inst)\r\n db.session.commit()\r\n self._refresh_cache(inst)\r\n return Response(json.dumps(inst.dictize()), 200,\r\n mimetype='application/json')\r\n except IntegrityError:\r\n db.session.rollback()\r\n raise\r\n except Exception as e:\r\n return error.format_exception(\r\n e,\r\n target=self.__class__.__name__.lower(),\r\n action='PUT')",
"def update_ship(id):\n data = request.get_json()\n print(data)\n for ship in db['ships']:\n if ship['id'] == id:\n if data['name']:\n ship['name'] == data['name']\n if data['age']:\n ship['age'] == data['age']\n return ship, status.HTTP_202_ACCEPTED\n return {}, status.HTTP_404_NOT_FOUND",
"def put(self, id ):\n adm = Administration()\n print(api.payload)\n p = Profile.from_dict(api.payload)\n if p is not None:\n p.set_id(id)\n adm.save_profile(p)\n return p, 200\n else:\n return '', 500",
"def update_item(id: str, obj: endpoint_model):\n # should this error if exists?\n if obj.id:\n if obj.id != id:\n raise HTTPException(status_code=400, detail=\"id in body does not match id in path\")\n else:\n obj.id = id\n new_obj = db.save(obj)\n return new_obj",
"def put(self, id):\n data = request.json\n update_scenario(id, data)\n return None, 204",
"def put(self, id=None):\n\n if not id:\n return {'msg':'Missing achievement id.'}, 400\n\n if not all(\n [request.form.get('roll_no'),\n request.form.get('name'),\n request.form.get('batch'),\n request.form.get('programme'),\n request.form.get('category'),]):\n \n return {'msg':'Field(s) missing.'}, 400\n\n try:\n ach = AcademicAchievement.query.get(id)\n\n if not ach:\n return {'msg':'Academic achievement not found'}, 404\n\n ach.roll_no = request.form.get('roll_no'),\n ach.name = request.form.get('name'),\n ach.batch = checkBatch(request.form.get('batch')),\n ach.programme = request.form.get('programme'),\n ach.category = request.form.get('category'),\n\n ach.save()\n data = ach.toDict()\n\n return {'data' : data}, 200\n\n except (ValueError, mongoalchemy.exceptions.BadValueException) as e:\n print(e)\n return {'msg':'Invalid form data.'}, 400\n\n except Exception as e:\n print(e)\n return {'msg':'Could not modify academic achievement.'}, 500",
"def put(self, _id):\n payload = self.request.json\n # TODO: validate the json before updating the db\n self.app.db.jobs.update({'_id': int(_id)}, {'$set': {'status': payload.get('status'), 'activity': payload.get('activity')}})",
"def put(self,id):\n adm = Administration()\n s = Suggestion.from_dict(api.payload)\n if s is not None:\n s.set_id(id)\n adm.save_suggestion(s)\n return s, 200\n\n else:\n return '', 500",
"def put(self, id):\n adm = Administration()\n lg = LearnGroup.from_dict(api.payload)\n if lg is not None:\n\n lg.set_id(id)\n adm.save_learngroup(lg)\n return lg, 200\n\n else:\n return '', 500",
"def put(self, id):\n return userDao.update(id, api.payload)",
"def test_update_goal(self):\n pass",
"def put(self, id):\n return None, 204",
"def edit_a_parcel(destination, id):\n query = \"\"\"UPDATE parcels SET destination = %s WHERE id = %s\"\"\"\n tuple =(destination , id)\n db.insert(query, tuple)",
"def put(self, id):\n return update_msg(request.json, id)",
"def set_goal(self, new_goal, updating=False):\n GOAL_QUERY = \"\"\"UPDATE Goal SET description = %s WHERE id = %s AND curriculum_name = %s\"\"\" if updating \\\n else \"\"\"INSERT INTO Goal (id, curriculum_name, description) VALUES (%s, %s, %s)\"\"\"\n\n if not updating:\n self.db_cursor.execute(\n GOAL_QUERY,\n (new_goal.id, new_goal.curriculum_name, new_goal.description))\n else:\n self.db_cursor.execute(\n GOAL_QUERY,\n (new_goal.description, new_goal.id, new_goal.curriculum_name))\n self.db_connection.commit()",
"def patch(self, id=None):\n if id:\n boat2Depart = test4ValidEntity(id)\n if boat2Depart == None:\n self.response.set_status(404)\n else:\n requestBody = json.loads(self.request.body)\n query = Slip.query(Slip.number == requestBody['number'])\n result = query.fetch(limit = 1)\n for match in result:\n if match.current_boat == boat2Depart.id and match.number == requestBody['number']:\n boat2Depart.at_sea = True\n boat2Depart.put()\n match.current_boat = None\n match.arrival_date = None\n match.departure_date = requestBody['departure_date']\n match.departed_boat = boat2Depart.id\n match.put()\n slip_dict = match.to_dict()\n del slip_dict['departure_history']\n self.response.headers['Content-Type'] = 'application/json'\n self.response.write(json.dumps(slip_dict))\n else:\n self.response.set_status(400)",
"def put(self, id):\n data = request.json\n update_entry(id, data)\n return None, 204",
"def get(self, _id):",
"def put(self, id):\n req = api.payload\n try:\n result = update_task(\n get_db(),\n id,\n req[\"task\"],\n date.fromisoformat(req[\"due_by\"]),\n Status[req[\"status\"]],\n )\n return task_to_dict(result), 201\n except ValueError:\n api.abort(422, \"Invalid Status\")"
] | [
"0.68321764",
"0.6705152",
"0.6541664",
"0.6067706",
"0.6032877",
"0.6005164",
"0.59921056",
"0.5976328",
"0.59698373",
"0.5885591",
"0.58422995",
"0.58416253",
"0.5819017",
"0.5806884",
"0.58029574",
"0.5794082",
"0.57710695",
"0.57252264",
"0.57099134",
"0.56292725",
"0.56237155",
"0.56168914",
"0.55942357",
"0.55751616",
"0.5572422",
"0.55689883",
"0.5552304",
"0.55462456",
"0.54960907",
"0.5491878"
] | 0.67704624 | 1 |
This function takes a csv file as an argument deduplicates the file and writes the deduplicated dataset to a csv file if a path for the output file is provided as the second argument It returns the deduplicated dataframe Parameters , type, return values | def dataDedup_csv(infile, outfile=None):
if fpath.isfile(infile):
dataset = pd.read_csv(infile, sep=',', dtype='unicode')
dedup_dataset = dataset.drop_duplicates()
if outfile!=None:
dedup_dataset.to_csv(outfile,
encoding='utf-8', index=False,
header=False)
return dedup_dataset
else:
print("file \"%s\" does not exist... or is not a file..." %(infile)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def strip_duplicates(in_file, out_file, sep_type=\"\", header_rows=0):\n\n util.check_output_dir(out_file)\n\n if header_rows !=0: header=read_header(in_file, num_header_rows=header_rows, sep_type =\"\")\n\n if sep_type==\"\":\n data=pd.read_csv(in_file, skiprows=header_rows, header=None, delim_whitespace=True) \n else:\n data=pd.read_csv(in_file, skiprows=header_rows, header=None, sep=sep_type)\n\n dup=data.duplicated(keep='first')\n dup_False=np.where(dup==False)\n\t\n no_dup=data.loc[dup_False]\n\n len_no_dup=no_dup.shape[0]\n len_dup_False_indx=len(dup_False[0])\n\n try:\n assert len_no_dup == len_dup_False_indx\n except AssertionError:\n print(\"Removal of duplicates and creation of new output failed.\")\n print(\"Length of no duplicated indices does not match the subsampled main dataframe... function failiure :(\")\n\n\t\n if header_rows !=0: \n frames = [header, no_dup]\n no_dup = pd.concat(frames)\n\n if sep_type==\"\":\n no_dup.to_csv(out_file, sep=\"\\t\", header=False, index=False)\n print(\"Duplicates removed - output file: %s\" %(out_file))\n else:\n no_dup.to_csv(out_file, sep=sep_type, header=False, index=False)\n print(\"Duplicates removed - output file: %s\" %(out_file))",
"def save_csv(csv_path: str, duplicates: pd.DataFrame) -> None:\n csv_file = os.path.join(csv_path, 'duplicates.csv')\n duplicates.to_csv(csv_file, index=False)",
"def remove_duplicates_phase_data():\n print(\"Removing any duplicates...\")\n merged_phases_data = pd.read_csv(results_folder + 'phases/raw/merged_phases.csv', header=0,\n skipinitialspace=True, usecols=output_fields)\n df = pd.DataFrame(merged_phases_data)\n clean_df = df.drop_duplicates()\n clean_df.to_csv(results_folder + 'phases/processed/clean_merged_phases.csv', sep=',', index=False)\n print(\"Duplicates removed!\")",
"def main(argv=None):\n args, ret = parse_cmdline(argv)\n if ret != GOOD_RET:\n return ret\n\n deduped = compress_dups(read_csv(args.file, all_conv=float), args.column)\n write_csv(deduped, create_out_fname(args.file, prefix=PREFIX),\n read_csv_header(args.file))\n\n return GOOD_RET # success",
"def check_duplicates(in_file, sep_type=\"\", header_rows=0):\n\n if sep_type==\"\":\n data=pd.read_csv(in_file, skiprows=header_rows, header=None, delim_whitespace=True) \n else:\n data=pd.read_csv(in_file, skiprows=header_rows, header=None, sep=sep_type)\n\n dup=data.duplicated(keep='first')\n dup_True=np.where(dup==True)\n len_dup_True_indx=len(dup_True[0])\n\n if len_dup_True_indx == 0:\n print(\"No duplicated rows in %s\" %(in_file))\n else:\n print(\"%i duplicated rows found in %s\" %(len_dup_True_indx, in_file))",
"def remove_dupes(infile):\n filename = infile.replace('.csv', '-unique.csv')\n s = set()\n with open(filename, 'w') as outfile:\n for line in open(infile):\n if line not in s:\n outfile.write(line)\n s.add(line)",
"def history_clones(file, ht_df):\n if os.path.isfile(file):\n # if the file exists, we merge\n print(file + ' found, merging')\n df_file = pd.read_csv(file)\n\n ht_df['timestamp'] = pd.to_datetime(ht_df['timestamp']).dt.date\n\n df_file = pd.concat([df_file, ht_df])\n df_file['timestamp'] = df_file['timestamp'].astype(str)\n\n df_file.sort_values('timestamp', inplace=True)\n print(df_file.to_string())\n # we can't just drop the first instance: for the first day, we'll loose data.\n # so keep max value per date\n\n #df_file.drop_duplicates(subset=['timestamp'], keep='last', inplace=True)\n df_file = df_file.groupby('timestamp')[['uniques', 'count']].agg(['max']).reset_index()\n\n df_file.columns = df_file.columns.droplevel(level=1)\n #print(df_file.to_string())\n #print(df_file.columns)\n df_file.to_csv(file, index=False)\n\n else:\n # otherwise, just dump the df\n print('There is no file to merge, dumping df to ' + file)\n ht_df.to_csv(file, index=False)",
"def merge_duplicate_psm_rows(\n csv_file_path=None,\n psm_counter=None,\n psm_defining_colnames=None,\n psm_colnames_to_merge_multiple_values={},\n joinchar=\"<|>\",\n overwrite_file=True,\n):\n rows_to_merge_dict = defaultdict(list)\n\n if overwrite_file:\n tmp_file = csv_file_path + \".tmp\"\n os.rename(csv_file_path, tmp_file)\n out_file = csv_file_path\n else:\n tmp_file = csv_file_path\n out_file = csv_file_path.strip(\".csv\") + \"_merged_duplicates.csv\"\n UNode.print_info(\"Merging rows of the same PSM...\", caller=\"postflight\")\n # print('Merging rows of the same PSM...')\n csv_kwargs = {}\n if sys.platform == \"win32\":\n csv_kwargs[\"lineterminator\"] = \"\\n\"\n else:\n csv_kwargs[\"lineterminator\"] = \"\\r\\n\"\n with open(tmp_file, \"r\") as tmp, open(out_file, \"w\", newline=\"\") as out:\n tmp_reader = csv.DictReader(tmp)\n writer = csv.DictWriter(out, fieldnames=tmp_reader.fieldnames, **csv_kwargs)\n writer.writeheader()\n for row in tmp_reader:\n psm = tuple([row[x] for x in psm_defining_colnames if x in row.keys()])\n # each unique combination of these should only have ONE row!\n # i.e. combination of seq+spec+score\n if psm_counter[psm] == 1:\n # no duplicate = no problem, we can just write the row again\n writer.writerow(row)\n elif psm_counter[psm] > 1:\n # we have to collect all rows of this psm, and merge + write\n # them later!\n rows_to_merge_dict[psm].append(row)\n else:\n raise Exception(\"This should never happen.\")\n # finished parsing the old unmerged unified csv\n for rows_to_merge in rows_to_merge_dict.values():\n writer.writerow(\n merge_rowdicts(\n rows_to_merge,\n psm_colnames_to_merge_multiple_values,\n joinchar=joinchar,\n )\n )\n # remove the old unified csv that contains duplicate rows\n if overwrite_file:\n os.remove(tmp_file)\n UNode.print_info(\"Done.\", caller=\"postflight\")\n return out_file",
"def remove_duplicated_lines():\n\n work_folder = os.path.join(CURRENT_FOLDER, \"..\\\\Data\\\\weather_data\")\n unique_lines = []\n # compare line be line\n with open(os.path.join(work_folder, \"tempfile.csv\"), \"w\") as outfile:\n with open(os.path.join(work_folder, \"filtered_merged_history_KMDW.csv\")) as infile:\n for line in infile:\n if line not in unique_lines:\n outfile.write(line)\n unique_lines.append(line)\n # replace files\n shutil.copyfile(os.path.join(work_folder, 'tempfile.csv'), os.path.join(\n work_folder, \"filtered_merged_history_KMDW.csv\"))\n # remove temp file\n os.remove(os.path.join(work_folder, \"tempfile.csv\"))",
"def prep_file(filename: str,\n dialect: csvhelper.Dialect,\n key_cols: List[int],\n temp_dir: str,\n out_dir: str,\n already_sorted: bool,\n already_uniq: bool) -> Tuple[str, int]:\n dups_removed = 0\n\n # Sort the file if necessary\n if already_sorted:\n if dialect.has_header:\n abort('Invalid config: already_sorted and has-header')\n sorted_fn = filename\n elif (dialect.quoting == csv.QUOTE_NONE\n and dialect.escapechar is None\n and dialect.doublequote is None\n and dialect.has_header is False):\n sorter = gsorter.CSVSorter(dialect, key_cols, temp_dir, out_dir) # type: ignore\n sorted_fn = sorter.sort_file(filename)\n else:\n sorted_fn = filename + '.sorted'\n sort_key_config = convert_key_offsets_to_sort_key_config(key_cols)\n sorter = gsorter.CSVPythonSorter(in_fqfn=filename, # type: ignore\n out_fqfn=sorted_fn,\n sort_keys_config=sort_key_config,\n dialect=dialect,\n dedupe=(not already_uniq),\n keep_header=False)\n sorter.sort_file() # type: ignore\n sorter.close() # type: ignore\n dups_removed = sorter.stats['recs_deduped'] # type: ignore\n already_uniq = True\n\n # Dedupe the file if necessary - only for the CSVSorter:\n if already_uniq:\n final_name = sorted_fn\n else:\n deduper = gdeduper.CSVDeDuper(dialect, key_cols, out_dir)\n final_name, read_cnt, write_cnt = deduper.dedup_file(sorted_fn)\n dups_removed = read_cnt - write_cnt\n if sorted_fn != filename:\n os.remove(sorted_fn)\n\n return final_name, dups_removed",
"def create_unique_file(files_to_concat: list) -> pd.DataFrame:\n dfs_to_concat = []\n\n print(f'Number of files: {len(files_to_concat)}')\n\n for file in files_to_concat:\n\n year = int(file[0])\n month = file[1]\n filepath = file[2]\n\n # Use pd.read_csv to solve some problems with files\n # engine: python - This parameter is slower compared to c-engine but handle but handle\n # some problematic characters better\n # sep=\"[\\t;]\" - using python-engine it's possible to use regular expressions to define the sep char, where\n # python identify the char to use with each file.\n # skiprows = 1 - As the columns have different names in many files, I just combine header=None with skiprows=1\n # with this, just data is read.\n actual_df = pd.read_csv(filepath, engine='python', sep=\"[\\t;]\", skiprows=1, header=None, dtype='category')\n\n # File 2017-Dezembro.csv has duplicate columns so an if is necessary here just to solve this problem.\n if month == 'Dezembro' and year == 2017:\n\n del(actual_df[7])\n actual_df.columns = [n for n in range(12)]\n\n # Creating two new columns with month and year for each file.\n actual_df['month'], actual_df['year'] = zip(*[(month, year) for n in range(len(actual_df))])\n\n print(f'Processing file: {filepath}')\n\n dfs_to_concat.append(actual_df)\n\n # Concat all files into unique_df\n unique_df = pd.concat(dfs_to_concat, axis=0, ignore_index=True)\n\n return unique_df",
"async def collate_similar_data(input_csv_file_path, output_csv_file_path):\n if not input_csv_file_path or not output_csv_file_path:\n return\n with open(output_csv_file_path, 'w') as file_object:\n csv_writer = csv.writer(file_object, delimiter=',')\n csv_writer.writerow(\n ('Account ID', 'First Name', 'Created On', 'Status',\n 'Status Set On'))\n for csv_row in read_csv_file(input_csv_file_path):\n account_status = (await fetch_account_status(csv_row[0]))\n csv_writer.writerow(csv_row + (\n account_status.get('status', ''),\n datetime.datetime.strftime(\n datetime.datetime.strptime(\n account_status.get('created_on'), '%Y-%m-%d'),\n '%Y-%m-%d') if account_status.get('created_on') else ''))",
"def list_all_duplicates(folder: str,\n to_csv: bool = False,\n csv_path: str = './',\n ext: str = None,\n fastscan: bool = False) -> pd.DataFrame:\n duplicate_files = create_table(folder, ext, pre=fastscan)\n duplicate_files = duplicate_files[duplicate_files['hash'].duplicated(keep=False)]\n duplicate_files.sort_values(by='hash', inplace=True)\n\n if to_csv is True:\n save_csv(csv_path, duplicate_files)\n\n return duplicate_files",
"def check_errors(csv_file):\n\n logger.info(\"Checking %s.\", csv_file)\n\n errors_found = False\n errors_file = f\"{os.path.splitext(csv_file)[0]}_errors.csv\"\n deduplicated_file = f\"{os.path.splitext(csv_file)[0]}_deduplicated.csv\"\n\n with open(csv_file, 'r', encoding=\"UTF-8\") as input_file,\\\n open(deduplicated_file, 'w', encoding=\"UTF-8\") as dedup,\\\n open(errors_file, 'w', encoding=\"UTF-8\") as errors:\n\n reader = csv.reader(input_file, delimiter=',')\n dedup_writer = csv.writer(dedup)\n error_writer = csv.writer(errors)\n line = 1\n entries = set()\n for row in reader:\n\n # Skip empty lines.\n if not ''.join(row).strip():\n continue\n\n # Record any incorrect classifications.\n if not row[1].lower() == \"normal\" and not row[1].lower() == \"anomaly\":\n error_writer.writerow(\n [line, row[0], row[1], \"INVALID_CLASSIFICATION\"])\n errors_found = True\n\n # Write first image entry to dedup file and record duplicates.\n key = row[0]\n if key not in entries:\n dedup_writer.writerow(row)\n entries.add(key)\n else:\n error_writer.writerow([line, row[0], row[1], \"DUPLICATE\"])\n errors_found = True\n line += 1\n\n if errors_found:\n logger.info(\"Errors found check %s.\", errors_file)\n else:\n os.remove(errors_file)\n os.remove(deduplicated_file)\n\n return errors_found",
"def filter_unique_ticker(state: State):\n if state.events.extract_company_list + state.events.load_company_list == 200:\n try:\n state.files.combined_exchanges.columns = map(str.lower, state.files.combined_exchanges.columns)\n\n # Following line is dropping duplicates but there's not?\n state.output = state.files.combined_exchanges[[\"symbol\", 'name', 'lastsale', 'marketcap', 'ipoyear', 'sector', 'industry']].drop_duplicates()\n state.output.to_csv(f\"{PATH}/data/combined_exchanges.csv\")\n state.events.transform_company_list = 100\n except Exception as e:\n state.output = None\n LOGGER.warning(f\"Could not transform company data , error: {e}\")\n\n else:\n state.output = pd.read_csv(f\"{PATH}/data/combined_exchanges_sample.csv\")\n LOGGER.warning(f\"Using old company ticker file\")",
"def make_clean_csv(panda_df, dest_path_name):\n panda_df.to_csv(dest_path_name)\n return True",
"def check_duplicated_data(self, path, target):\n files_in_path = [file for file in self.get_csv_in_path(path)]\n print(\"check duplicated for file {} in path {} , files\".format(target, path))\n if target in files_in_path:\n print('The {} is already exist'.format(target))\n return True\n return False",
"def dedup_file(in_fname, out_fname):\n with open(in_fname, 'r') as in_file, open(out_fname, 'w') as out_file:\n lines, n_lines, n_duplicates = get_lines(in_file)\n lines = list(lines)\n random.shuffle(lines)\n out_file.write('\\n'.join(lines))\n logging.info(f'deduplicated {in_fname}, removed {n_duplicates} duplicates out of {n_lines} lines')\n return n_lines, n_duplicates",
"def remove_duplicates(file):\n file_tmp = 'tmp'\n with open(file) as f, open(file_tmp, 'w') as o:\n for line in unique_everseen(f):\n o.write(line)\n # rename file_tmp to file\n os.remove(file)\n os.rename(file_tmp, file)",
"def _check_duplicate_id_csv(self):\n all_csv_ids = []\n self.msg_args = []\n for csv_file_rel in self.filter_files_ext('csv', relpath=True):\n csv_file = os.path.join(self.module_path, csv_file_rel)\n if os.path.basename(csv_file) == 'ir.model.access.csv':\n all_csv_ids.extend(self.get_field_csv(csv_file))\n duplicated_ids_csv = self.get_duplicated_items(all_csv_ids)\n for duplicated_id_csv in duplicated_ids_csv:\n self.msg_args.append((csv_file_rel, duplicated_id_csv))\n if duplicated_ids_csv:\n return False\n return True",
"def rm_dup_individuals(input_prefix, output_dir, base_prefix, prefix='temp_dedups_fids'):\n full_path, pprefix = os.path.split(input_prefix)\n\n # ============= OUTPUT FILES =============\n duplicated_samples_file = os.path.join(output_dir, '{}_samples_to_rm{}.csv'.format(prefix,base_prefix))\n no_dups_plink_prefix = os.path.join(output_dir, \"{}_{}\".format(prefix, base_prefix))\n\n # ============= REMOVE DUPLICATE SAMPLES =============\n # read fam file\n fam_df = pd.read_csv(input_prefix+\".fam\", sep=\"\\s+\", names=['FID', 'IID', 'c3', 'c4', 'c5', 'c6'])\n\n assert fam_df[~(fam_df.FID == fam_df.IID)].shape[0] == 0,\\\n \"FID and IID are *not* the same in this file:\\n{}\".format(input_prefix+\".fam\")\n\n\n # identify duplicated FID&IID\n dup_index = fam_df[fam_df.duplicated(subset=['FID', 'IID'], keep='first')].index\n dup_fids = fam_df.iloc[dup_index, :].FID.unique()\n\n # each duplicate FID & IID, except for the first instance, will be have \"_[counter]\" appened\n for this_fid in dup_fids:\n for counter, index_row in enumerate(fam_df.loc[fam_df['FID'] == this_fid].iterrows()):\n index, this_row = index_row\n if counter == 0:\n continue\n else:\n fam_df.loc[index, ['FID', 'IID']] = fam_df.loc[index, [\n 'FID', 'IID']].apply(lambda x: x+\"_{}\".format(counter))\n\n # write duplicated FID and IID to file\n if (fam_df.loc[dup_index, ['FID', 'IID']].shape[0] > 0):\n fam_df.loc[dup_index, ['FID', 'IID']].to_csv(duplicated_samples_file, sep=\" \", header=None, index=None)\n\n # OVERWRITE existing .fam to tagging duplicates\n fam_df.to_csv(input_prefix+\".fam\", sep=\" \", header=None, index=None)\n\n\n # plink to rm duplicates\n if (fam_df.loc[dup_index, ['FID', 'IID']].shape[0] > 0):\n rm_dups_cmd = \"plink --bfile {} --remove {} --make-bed --out {}\".format(\n input_prefix, duplicated_samples_file, no_dups_plink_prefix)\n else:\n rm_dups_cmd = \"plink --bfile {} --make-bed --out {}\".format(input_prefix, no_dups_plink_prefix)\n\n plink_stdout = run_shell_cmd(rm_dups_cmd)\n\n return no_dups_plink_prefix, plink_stdout",
"def compress_dups(data, column):\n idx = defaultdict(list)\n for row in data:\n idx[row[column]].append(row)\n\n dedup = []\n\n for idx_row in sorted(idx.items()):\n dedup.append(avg_rows(idx_row[1]))\n return dedup",
"def get_concatenated_csv_data(concatenated_filepath, concatenated_filename, device_id, output_create_files_filepath, output_create_files_filename):\n\n # Create the full file name of the concatenated filename.\n concatenated_file = concatenated_filepath + \"/\" + concatenated_filename + \"_concatenated.csv\"\n print(\"Looking for concatenated file name: \", concatenated_file)\n\n # Test if the concatenated file exists and if it does, return it.\n if os.path.isfile(concatenated_file):\n print(\"Concatenated file exists: \", concatenated_file)\n return concatenated_file\n\n # If it does not exist, test if the individual files exist.\n elif not os.path.isfile(concatenated_file):\n print(\"Concatenated file does not exist. Create file: \", concatenated_file)\n file_list = get_data_from_files(concatenated_filepath, concatenated_filename)\n # print(\"File list:\", file_list)\n\n # If the individual files exist, create the concatenated file.\n if len(file_list) > 0:\n print(\"Individual csv files exist. Creating the concatenated file.\")\n concatenated_file = create_concatenated_csvfile(concatenated_filepath, concatenated_filename)\n return concatenated_file\n\n # If the individual files do not exist, get the data from the database, create the files then concatenate them.\n else:\n database_query = \"select * from ship_data_gpggagpsfix where device_id=\" + int(\n device_id) + \" order by date_time;\"\n # print(database_query)\n password = input()\n\n db_connection = MySQLdb.connect(host='localhost', user='ace', passwd=password, db='ace2016', port=3306);\n\n track_df = get_data_from_database(database_query, db_connection)\n track_df = string_to_datetime(track_df)\n\n # Output the data into daily files (as they do not already exist).\n output_daily_files(track_df, output_create_files_filepath, output_create_files_filename)\n\n concatenated_file = create_concatenated_csvfile(concatenated_filepath, concatenated_filename)\n return concatenated_file",
"def add_companies_to_csv(companies, filename):\n\n df_add = create_company_df(companies)\n df_old = pd.read_csv(filename)\n frames = [df_old, df_add]\n df = pd.concat(frames)\n df = df.drop_duplicates()\n\n df.to_csv(filename, index=False)",
"def merge_csv_initial(output_filename, path):\n\n prefix = ['ParticipantID',\n 'igtb.datatime',\n 'igtb.timezone']\n\n names = ['irb',\n 'itp',\n 'ocb',\n 'inter.deviance',\n 'org.deviance',\n 'shipley.abs',\n 'shipley.vocab',\n 'neuroticism',\n 'conscientiousness',\n 'extraversion',\n 'agreeableness',\n 'openness',\n 'pos.affect',\n 'neg.affect',\n 'stai.trait',\n 'audit',\n 'gats.quantity',\n 'ipaq',\n 'psqi',\n 'gats.status']\n\n\n \n\n #b = np.loadtxt(path + names[0] + '.csv', delimiter=\",\", skiprows=1, usecols=(0, 1, 2), dtype=object)\n #a = np.array(b, dtype=object)\n\n for i,n in enumerate(names):\n file = path + n + '.csv'\n if(i==0):\n df = pd.read_csv(file, sep=',', index_col=0,usecols=[0,1,2,3]) \n df_all = df\n else:\n df = pd.read_csv(file, sep=',', index_col=0,usecols=[0,3]) \n df_all=pd.concat([df_all,df],axis=1)\n \n df_all=df_all.reset_index() \n a = df_all.as_matrix()\n\n # column_format = '%20s %10s %10s %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f %f'\n # column_format = '%20s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s %10s'\n column_format = '%20s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s, %10s'\n names_string = ','.join(prefix + names)\n\n print(a.shape)\n\n np.savetxt(output_filename, a, delimiter=\",\", fmt=column_format, comments='', header=names_string)\n\n return output_filename",
"def main(in_path, keep_path, out_path):\n\t# First open the input csv\n\tcsv_hndl = lambda x: np.array([np.array(r) for r in x])\n\tdata, headers = read_csv(in_path, csv_hndl, use_headers=True, delimiter=',')\n\n\t# Read headers to keep\n\tkeeps = []\n\n\t# Regex for ignoring comments\n\tcmnt_re = re.compile(\"^#\")\n\n\t# Open and read the file\n\twith open(keep_path) as f_obj:\n\t\tfor line in f_obj:\n\t\t\tline = line.strip()\n\t\t\t# If line is commented out, ignore\n\t\t\tif cmnt_re.match(line):\n\t\t\t\tcontinue\n\t\t\t# Otherwise add to list of keeps\n\t\t\tkeeps.append(line)\n\n\t# Prune the csv\n\tnew_data, new_headers = prune_csv(data,headers,keeps)\n\n\t# Write to output csv file\n\twrite_csv(\n\t\tout_path, \n\t\tnew_data, \n\t\tnew_headers, \n\t\tdelimiter=',', \n\t\tquotechar='\"',\n\t\tquoting=csv.QUOTE_MINIMAL\n\t)",
"def append_to_csv(df, csvFilePath, sep=\",\", supersede=False):\n\n if (not os.path.isfile(csvFilePath)) or supersede==True:\n df.to_csv(csvFilePath, index=False, sep=sep)\n\n else:\n d_od=df.columns\n f_od=pd.read_csv(csvFilePath,nrows=0,sep=sep).columns\n if np.setxor1d(d_od,f_od).size:\n raise Exception(\"Columns do not match: Dataframe columns are: \",\n d_od, \". CSV file columns are: \", f_od, \".\")\n\n else:\n df[f_od].to_csv(csvFilePath, mode='a', index=False, sep=sep, header=False)",
"def generate_filtered_csv_file(file_path, rows_id):\n\n data = pandas.read_csv(file_path)\n\n df = pandas.DataFrame(data)\n\n filtered_data = df.loc[set(rows_id)]\n\n new_file_path = new_path_generator(file_path)\n\n filtered_data.to_csv(new_file_path, index=False, header=True)\n\n LOGGER.info('New file path: %s', new_file_path)\n\n return new_file_path",
"def isolate_subreddit(csv_location, subreddit):\r\n\r\n individual_subreddit_csvs = csv_location + \"_\" + subreddit + '.*.csv'\r\n\r\n df = dd.read_csv(csv_location + \".csv\", header=0, sep='\\t')\r\n sub_df = df.loc[df['subreddit'] == subreddit]\r\n\r\n sub_df.to_csv(individual_subreddit_csvs)\r\n filenames = glob(individual_subreddit_csvs)\r\n with open(csv_location + \"_\" + subreddit + '.csv', 'w') as out:\r\n for fn in filenames:\r\n with open(fn) as f:\r\n out.write(f.read())\r\n os.remove(fn)",
"def pre_process_multispace(filepath, delimiter=\" \"):\n newpath = filepath+\".rev.csv\"\n with open(filepath, \"r\") as src_csv_file:\n with open(newpath, \"w\") as dst_csv_file:\n for src_line in src_csv_file:\n dst_csv_file.write(delimiter.join(src_line.split())+\"\\n\")"
] | [
"0.7729395",
"0.6743207",
"0.6410341",
"0.6338948",
"0.6320352",
"0.6265429",
"0.611104",
"0.60951626",
"0.6093322",
"0.5940891",
"0.5861167",
"0.57967466",
"0.57279533",
"0.5659171",
"0.5623228",
"0.5598873",
"0.5594488",
"0.5593631",
"0.5475531",
"0.54566574",
"0.5449391",
"0.5412171",
"0.5388504",
"0.536355",
"0.5355186",
"0.53284985",
"0.5304311",
"0.5293666",
"0.5266408",
"0.525995"
] | 0.78255796 | 0 |
This function checks for the size of a dataframe and splits it into parts containing approximately 1 million records as the default number of records for each dataframe.It also provides the option of writing the split dataframes to the disk. Parameters , type, return values | def dataFrameSplit(df, norec=1000000, outfile= None):
# calculation of the no. of rows of the dataframe
df_rsz = len(df.index)
if df_rsz>norec:
no_splits = np.ceil(df_rsz/norec)
dfarr = np.array_split(df,no_splits)
return dfarr
else:
print("The dataframe doesn't have sufficient records")
# printing to disk when
if outfile!=None:
i=0
for arr in dfarr:
arr.to_csv("D:\\ddf"+str(i+1)+".csv",encoding='utf-8', index=False,
header=False)
i = i+1 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def split_dataframe(df, size=10*1024*1024):\n \n # size of each row\n row_size = df.memory_usage().sum() / len(df)\n # maximum number of rows in each segment\n row_limit = int(size // row_size)\n # number of segments\n seg_num = (len(df)+row_limit-1)//row_limit\n # split df into segments\n segments = [df.iloc[i*row_limit : (i+1)*row_limit] for i in range(seg_num)]\n\n return segments",
"def test_03_dataframe_to_dataframe_w_chunksize(self):\n _, err = _iquery(\"store(flatten(DF1, cells_per_chunk:5), DF3)\")\n assert not err, err\n self._array_cleanups.append('DF3')\n check_v_sum('DF3')\n nchunks = chunk_count(vaid_of('DF3'))\n prt(\"DF3 has\", nchunks, \"chunks\")\n assert nchunks < self._df1_chunks, \"DF3 did not get dense!\"",
"def split_set(dataframe, test_size):\n i = np.floor(len(dataframe)*test_size).astype(int)\n set_a = dataframe[0:i].reset_index()\n set_b = dataframe[i:].reset_index()\n return set_a, set_b",
"def splitting_df(dataframe):\n dataframe = dataframe.dropna()\n index = 100\n train_set = dataframe.iloc[:index]\n test_set = dataframe.iloc[index:]\n return train_set, test_set, dataframe",
"def return_size(df):\n return round(sys.getsizeof(df) / 1e9, 2)",
"def split_data(df, test_size): \n\n X_train, X_test, y_train, y_test = train_test_split(df[[\"description_processed\", \"transaction_type\", \"transaction_account_type\"]],\n df['transaction_class'],\n test_size=test_size,\n shuffle=True,\n random_state=42)\n \n return X_train, X_test, y_train, y_test",
"def uploade_how_many_rows_we_want(self, df):\r\n try:\r\n if len(df) > 300000 or df.memory_usage(deep=True).sum() > self.memory:\r\n raise Exception(\"batch request\")\r\n try:\r\n self.insert(df)\r\n \r\n except Exception as ex:\r\n if 'string contains an untranslatable character' in str(ex):\r\n for i in np.where(df.dtypes != np.float)[0]:\r\n df['drop'] = df[df.columns[i]].apply(lambda x: self.is_ascii(x))\r\n l_tmp = (df['drop'][df['drop']].index)\r\n if len(l_tmp) > 0:\r\n print(\"rows remove: \" + str(list(l_tmp)))\r\n df.drop(l_tmp, inplace=True)\r\n df.drop('drop', axis=1, inplace=True)\r\n elif 'batch request' in str(ex) or 'LAN message' in str(ex):\r\n raise Exception(\"batch request\")\r\n else:\r\n print('error')\r\n print(ex)\r\n raise error\r\n self.rows += len(df)\r\n\r\n\r\n except Exception as ex:\r\n if \"batch request\" in str(ex):\r\n \r\n # split the data to 2 dataframes\r\n len_data = math.ceil(len(df)/2)\r\n df1 = df.iloc[:len_data]\r\n df2 = df.iloc[len_data:]\r\n\r\n self.uploade_how_many_rows_we_want(df1)\r\n self.uploade_how_many_rows_we_want(df2)\r\n\r\n\r\n else:\r\n print (ex)\r\n raise error",
"def make_dataframes(folders, file_stem):\n\n print \"Making one big dataframe...\"\n df_orig = load_df(folders, file_stem, n_files=500)\n # df_orig = load_df(folders, \"output\")\n # df_orig = load_df(folders, \"output_ma1Lt11\")\n # df_orig = load_df(folders, \"output_good\")\n\n print len(df_orig.index), 'entries in dataframe'\n\n # Drop columns to save space\n drop_cols = [\n 'h1u', 'h1d', 'h1b', 'h1V', 'h1G', 'h1A',\n 'h2u', 'h2d', 'h2b', 'h2V', 'h2G', 'h2A',\n 'Brh3gg', 'Brh3tautau', 'Brh3bb', 'Brh3ww',\n 'Brh3zz', 'Brh3gammagamma', 'Brh3zgamma',\n 'Brh3h1h1', 'Brh3h2h2', 'Brh3h1h2',\n 'Brh3a1a1', 'Brh3a1z',\n # 'bsgamma', 'bsmumu', 'btaunu', 'delms', 'delmd']\n ]\n\n for col in drop_cols:\n if col in df_orig.columns.values:\n df_orig.drop(col, inplace=True, axis=1)\n print \"After dropping columns:\", df_orig.columns.values, len(df_orig.columns.values), \"columns\"\n\n # Remove any duplicate entries\n df_orig.drop_duplicates(inplace=True)\n\n # Load up the glu-glu cross sections for 13 TeV\n print \"Adding in cross-sections...\"\n # cs = pd.read_csv(\"parton_lumi_ratio.csv\")\n cs = pd.read_csv(\"YR3_cross_sections.csv\")\n masses = cs[\"MH [GeV]\"]\n mass_len = len(masses)\n xsec_ggf13 = cs[\"ggF 13TeV Cross Section [pb]\"]\n xsec_vbf13 = cs[\"VBF 13TeV Cross Section [pb]\"]\n # xsec_wh13 = cs[\"WH 13TeV Cross Section [pb]\"]\n # xsec_zh13 = cs[\"ZH 13TeV Cross Section [pb]\"]\n xsec_ggf8 = cs[\"ggF 8TeV Cross Section [pb]\"]\n xsec_vbf8 = cs[\"VBF 8TeV Cross Section [pb]\"]\n\n def find_closest_mass_ind(mass):\n pos = bisect_left(masses, mass)\n if pos == mass_len:\n return mass_len - 1\n return pos\n\n print 'Storing nearest-mass indices'\n df_orig['mass_ind_h1'] = df_orig.apply(lambda row: find_closest_mass_ind(row['mh1']), axis=1)\n df_orig['mass_ind_h2'] = df_orig.apply(lambda row: find_closest_mass_ind(row['mh2']), axis=1)\n df_orig['mass_ind_h3'] = df_orig.apply(lambda row: find_closest_mass_ind(row['mh3']), axis=1)\n\n # ALL XSEC STORED ARE CORRECTLY SCALED BY REDUCED COUPLING\n print \"Storing 13 TeV gg xsec\"\n df_orig[\"xsec_ggf13_h1\"] = df_orig['h1ggrc2'] * xsec_ggf13[df_orig['mass_ind_h1']].values\n df_orig[\"xsec_ggf13_h2\"] = df_orig['h2ggrc2'] * xsec_ggf13[df_orig['mass_ind_h2']].values\n df_orig[\"xsec_ggf13_h3\"] = df_orig['h3ggrc2'] * xsec_ggf13[df_orig['mass_ind_h3']].values\n\n print \"Storing 13 TeV vbf xsec\"\n df_orig[\"xsec_vbf13_h1\"] = df_orig['h1vvrc2'] * xsec_vbf13[df_orig['mass_ind_h1']].values\n df_orig[\"xsec_vbf13_h2\"] = df_orig['h2vvrc2'] * xsec_vbf13[df_orig['mass_ind_h2']].values\n df_orig[\"xsec_vbf13_h3\"] = df_orig['h3vvrc2'] * xsec_vbf13[df_orig['mass_ind_h3']].values\n\n print \"Storing 8 TeV ggf xsec\"\n df_orig[\"xsec_ggf8_h1\"] = df_orig['h1ggrc2'] * xsec_ggf8[df_orig['mass_ind_h1']].values\n df_orig[\"xsec_ggf8_h2\"] = df_orig['h2ggrc2'] * xsec_ggf8[df_orig['mass_ind_h2']].values\n df_orig[\"xsec_ggf8_h3\"] = df_orig['h3ggrc2'] * xsec_ggf8[df_orig['mass_ind_h3']].values\n\n print \"Storing 8 TeV vbf xsec\"\n df_orig[\"xsec_vbf8_h1\"] = df_orig['h1vvrc2'] * xsec_vbf8[df_orig['mass_ind_h1']].values\n df_orig[\"xsec_vbf8_h2\"] = df_orig['h2vvrc2'] * xsec_vbf8[df_orig['mass_ind_h2']].values\n df_orig[\"xsec_vbf8_h3\"] = df_orig['h3vvrc2'] * xsec_vbf8[df_orig['mass_ind_h3']].values\n\n # Now add in individual channel xsec\n store_channel_xsec(df_orig)\n print df_orig.columns.values\n\n # Make some subsets here:\n print \"Making subsets...\"\n\n # Points passing all experimental constraints chosen\n df_pass_all = subset_pass_constraints(df_orig)\n # df_pass_all = None\n\n # subset with 2m_tau < ma1 < 10\n df_ma1Lt10 = None\n # df_ma1Lt10 = subset_var(df_pass_all, 3.554, 10.5, \"ma1\")\n\n mhmin, mhmax = 122.1, 128.1\n # subset with h1 as h_125\n # df_h1SM = subset_var(df_pass_all, mhmin, mhmax, \"mh1\")\n df_h1SM = None\n\n # subset with h2 as h_125\n # df_h2SM = subset_var(df_pass_all, mhmin, mhmax, \"mh2\")\n df_h2SM = None\n\n n_orig = len(df_orig.index)\n\n def percent_str(numerator, denominator):\n return \"%.3f %% \" % (100*numerator/float(denominator))\n\n print \"Running over\", n_orig, \"points\"\n if isinstance(df_pass_all, pd.DataFrame):\n n_pass_all = len(df_pass_all.index)\n print n_pass_all, \"points passing all constraints (= %s)\" % percent_str(n_pass_all, n_orig)\n # print len(df_ma1Lt10.index), \"of these have 2m_tau < ma1 < 10 GeV (= %s)\" % percent_str(len(df_ma1Lt10.index), n_pass_all)\n # print len(df_h1SM.index), \"points in the h1 = h(125) subset (= %s)\" % percent_str(len(df_h1SM.index), n_pass_all)\n # print len(df_h2SM.index), \"points in the h2 = h(125) subset (= %s)\" % percent_str(len(df_h2SM.index), n_pass_all)\n print \"\"\n\n return df_orig, df_pass_all, df_ma1Lt10, df_h1SM, df_h2SM",
"def split_df(df, n_chunks):\n chunk_size = int(np.ceil(df.shape[0] / n_chunks))\n assert n_chunks * chunk_size >= df.shape[0]\n chunks = []\n for i in range(0, df.shape[0], chunk_size):\n chunks.append(df[i:i + chunk_size])\n assert len(chunks) == n_chunks\n return chunks",
"def split(self):\n \n spl = self.which('split')\n if spl:\n self.__tmp = \"/tmp\"\n self.__tmpout = \"/tmp/output\"\n if not os.path.exists(self.__tmpout):\n os.makedirs(self.__tmpout)\n #os.chdir(\"/tmp\")\n '''\n assume split prog overwrites existing files if\n there is a conflict in file names\n '''\n #thecommand = \"%s -a 3 -b 500k %s %s/%s\" % (spl, self.__filename, self.__tmpout, self.__filename + self.__postfix)\n thecommand = \"%s -a 3 -b 10m %s %s/%s\" % (spl, self.__filename, self.__tmpout, self.__filename + self.__postfix)\n os.system(thecommand)\n dirList=os.listdir(self.__tmpout)\n #self.constructCat(dirList)\n for chunkfilename in dirList:\n #print chunkfilename \n #self.__cat += self.__remotepath + \"/\" + chunkfilename + \" \"\n #print self.__cat\n self.__flist.append(self.__tmpout + \"/\" + chunkfilename)\n #print self.__flist\n self.writeLog(chunkfilename, self.md5(fileName=self.__tmpout + \"/\" + chunkfilename))\n self.__numchunks = len([item for item in os.listdir(self.__tmpout) if os.path.isfile(self.__tmpout + \"/\" + item)])\n else:\n try:\n f = open(self.__filename, 'rb')\n except (OSError, IOError), e:\n raise FileSplitterException, str(e)\n \n bname = (os.path.split(self.__filename))[1]\n # Get the file size\n fsize = os.path.getsize(self.__filename)\n # dynamically calculate number of chunks\n strfsize = str(fsize)\n '''\n in MB's\n 8 - teens\n 9 - hundreds\n 10 - gigabytes\n '''\n if len(strfsize) == 8:\n #self.__numchunks = fsize/100000\n self.__numchunks = fsize/50000\n elif len(strfsize) == 9:\n #self.__numchunks = fsize/1000000\n self.__numchunks = fsize/500000\n elif len(strfsize) == 10:\n #self.__numchunks = fsize/10000000\n self.__numchunks = fsize/5000000\n #print '\\nSplitting file %s into %d chunks' % (self.__filename, self.__numchunks)\n # Get size of each chunk\n self.__chunksize = int(float(fsize)/float(self.__numchunks))\n \n chunksz = self.__chunksize\n total_bytes = 0\n \n for x in range(self.__numchunks):\n #chunkfilename = bname + '-' + str(x+1) + self.__postfix\n chunkfilename = bname + ('-%03d' % (x+1)) + self.__postfix\n # kill residual file if it exists\n if os.path.exists(chunkfilename):\n os.remove(chunkfilename)\n \"\"\"\n if reading the last section, calculate correct\n chunk size.\n \"\"\"\n if x == self.__numchunks - 1:\n chunksz = fsize - total_bytes\n \n try:\n if self.__debug:\n print 'Writing file chunk: %s' % chunkfilename\n data = f.read(chunksz)\n total_bytes += len(data)\n chunkf = file(chunkfilename, 'wb')\n chunkf.write(data)\n chunkf.close()\n #self.__cat += self.__remotepath + \"/\" + chunkfilename + \" \"\n self.__flist.append(chunkfilename)\n self.writeLog(chunkfilename, self.md5(fileName=chunkfilename))\n except (OSError, IOError), e:\n print e\n continue\n except EOFError, e:\n print e\n break\n\n print '\\nSplit complete on file: %s into %d chunks\\n' % (self.__filename, self.__numchunks)\n self.__logfhandle.close()\n #self.__cat += \"> \" + self.__remotepath + \"/\" + self.__filename\n self.set_cat_statement()",
"def df2db_separate(self, df: pd.DataFrame, tab_name):\n self.execute(\"set hive.execution.engine = tez\")\n self.execute(\"set tez.queue.name = sephora_internal\")\n self.execute(\"drop table if exists {table_name}\".format(table_name=tab_name))\n\n max_df_size = 50000\n\n dfs = df_split(df, batch_size=max_df_size)\n num_piece = len(dfs)\n\n dfs[0].to_sql(tab_name, self.engine, method='multi', index=False)\n if num_piece > 1:\n for pdf in dfs[1:]:\n self.execute(\"DROP TABLE IF EXISTS {tt}\".format(tt=tab_name + '_tmp'))\n pdf.to_sql(tab_name + '_tmp', self.engine, method='multi', index=False)\n self.execute(\"INSERT INTO TABLE {tn} SELECT * FROM {tt}\".format(\n tn=tab_name, tt=tab_name + '_tmp'\n ))\n print(len(pdf))\n self.execute(\"DROP TABLE IF EXISTS {tt}\".format(tt=tab_name + '_tmp'))",
"def split_on_whole_table(\n df: pyspark.DataFrame,\n ) -> pyspark.DataFrame:\n return df",
"def split_data(input_df, output_df):\n return train_test_split(input_df, output_df, test_size=0.2, random_state=42,\n stratify=output_df)",
"def getSplits(df, train_size, val_size, test_size, seed=None):\n size = len(df)\n\n # size is considered a percentage if less than 1:\n train_size = int(train_size * size) if train_size < 1 else train_size\n val_size = int(val_size * size) if val_size < 1 else val_size\n test_size = int(test_size * size) if test_size < 1 else test_size\n\n if not seed is None:\n np.random.seed(seed)\n\n train_val_idx = np.random.choice(\n a=range(size),\n size=train_size + val_size,\n replace=False\n )\n train_idx = train_val_idx[:train_size]\n val_idx = train_val_idx[train_size:]\n\n train = df.iloc[train_idx]\n val = df.iloc[val_idx]\n test = df.drop(train.index).drop(val.index) # test is equal to the leftover\n\n assert len(train) + len(val) + len(test) == len(df)\n\n return train, val, test",
"def calculateChunkSize(size, record_count, splits):\n avg_record_size = size / record_count\n logging.info(\n \"Avg record size: %0.02f=%d/%d\" %\n (avg_record_size, size, record_count))\n chunk = floor(ceil(size / (splits * avg_record_size)) * avg_record_size)\n\n logging.info(\n \"Setting chunk to: %d=floor(ceil(%d/(%d*%0.02f))*%0.02d)\" %\n (chunk, size, splits, avg_record_size, avg_record_size))\n return chunk",
"def make_bedfiles():\n df = pd.read_csv(\"%s.length\" % ref, sep='\\t', header=None)\n thresh = math.ceil(sum(df[1]) / globals()['jobs_per_pool'])\n lines = []\n fcount = 0\n fsum = 0\n for count,row in enumerate(df.index):\n contig, length = list(df.loc[row, :])\n fsum += length\n lines.append([contig, str(length)])\n if fsum >= thresh or count + 1 == len(df.index):\n make_bedfile(lines, fcount)\n lines = []\n fcount += 1\n fsum = 0\n return fcount",
"def split_df(df,\n test_size=.10,\n random_state=42):\n train_df, test_df = train_test_split(df,\n test_size=test_size,\n random_state=random_state)\n return train_df, test_df",
"def test_split(range_size, partition_size):\n dump = Mock()\n\n iterable = list(range(range_size))\n\n list(_split(partition_size=partition_size, dump=dump, iterable=iterable))\n expected_call_count = (range_size // partition_size) + int(bool(range_size % partition_size))\n\n assert dump.call_count == expected_call_count",
"def chunk_data(path, chunksize):\n reader = pandas.read_table(path, chunksize=chunksize, skiprows=0)\n\n start = 0\n for chunk in reader:\n stop = start + len(chunk) - 1\n dataframe_to_csv(chunk, file=get_chunk_file_name(path, (start, stop)))\n start = stop + 1\n\n return alphabetize_chunk_files(os.path.basename(path))",
"def make_files_for_cases(size):\n case_counts = get_case_counts_for_primary_sites()\n for primary_site in case_counts:\n print(\"one done\")\n if case_counts[primary_site] >= size:\n temp_file = get_all_cases_from_primary_site(primary_site)\n if len(temp_file) >= size:\n df = pd.DataFrame(temp_file, columns = [\"primary_site\",\"case_uuid\", \"rna_seq_uuid\"])\n df.to_csv(\"data/\" + primary_site + \"_case_rna_uuids.csv\", sep = \",\")\n return",
"def get_test_data_df(X,size: int = 1): \n num_rows = len(X)\n test_df = X.copy()\n\n while num_rows < size:\n test_df = test_df.append(test_df)\n num_rows = len(test_df)\n\n return test_df[:size].reset_index(drop = True)",
"def split_dataframe(df:\"pandas.DataFrame, pandas.Series\", sections:\"int\"=5, drop_index:\"bool\"=True, output:\"str\"=\"dataframe\")-> \"None or pandas.DataFrame\":\n import numpy as np\n from IPython.display import display_html\n \n if sections <= 0:\n raise ValueError('number sections must be larger than 0.')\n \n ### Find out how to keep column names when dropindex=True\n ### if series, dont allow drop index?\n \n ### allow passing in of desired column names as an array of strings (will result\n ### in dup col names but it won't matter if its only being displayed and not used in calculations)\n\n if isinstance(df, pandas.Series):\n df = df.to_frame()\n \n if drop_index:\n df.reset_index(drop = True, inplace=True)\n else:\n df.reset_index(level=0, inplace=True)\n\n df_split = np.array_split(df, sections)\n num_rows = [column.shape[0] for column in df_split]\n \n if output == \"dataframe\":\n \n alldata = [column.values.tolist() for column in df_split]\n \n # Add empty rows to each DataFrame until all DataFrames have the same number of rows\n for i in range(len(alldata)):\n while len(alldata[i]) < max(num_rows):\n alldata[i].append([\"\"]*df.shape[1])\n\n # Create rows of values across all of the DataFrames in alldata\n # When each entire row is created, add it to the output DataFrame\n dataframe = [] # <-- Output DataFrame\n for row_index in range(max(num_rows)):\n across_row = []\n for dataf in alldata:\n across_row.extend(dataf[row_index])\n dataframe.extend([across_row])\n \n return pandas.DataFrame(data=dataframe)\n \n if output == \"html\":\n strHtml = ''\n for x in split_dataframe:\n strHtml += x.to_html()\n display_html(strHtml.replace('table','table style=\"display:inline\"'), raw=True)",
"def readDBchunks(self, tablename, orderField, chunksize=50000,\n selectOptions=None, limit=None, filterOptions=None, verbose=False):\n\n if limit:\n remaining = limit\n next_chunk = min(remaining, chunksize)\n else:\n next_chunk = chunksize\n\n cont = 0\n \n selectOptions = selectOptions + ', ' + orderField\n\n df = self.readDBtable(tablename, limit=next_chunk, selectOptions=selectOptions,\n filterOptions = filterOptions, orderOptions=orderField)\n\n while (len(df)):\n cont = cont+len(df)\n if verbose:\n print('[DBManager (readDBchunks)] Number of rows read so far:', cont)\n if limit:\n remaining = limit - cont\n next_chunk = min(remaining, chunksize)\n else:\n next_chunk = chunksize\n yield df.iloc[:,:-1]\n\n #Next we need to start from last retrieved element\n filtercondition = orderField + '>' + str(df.iloc[:,-1][len(df)-1])\n if filterOptions:\n filtercondition = filtercondition + ' AND ' + filterOptions\n \n if next_chunk>0:\n df = self.readDBtable(tablename, limit=next_chunk, selectOptions=selectOptions,\n filterOptions = filtercondition, orderOptions=orderField)\n else:\n #If maximum number of records has been reached, set df to empty list to exit\n df = []",
"def mixed_divide_by_events_lenght(data_df:pd.DataFrame, path_column, sizes_filename=None):\n sizes = None\n if sizes_filename is not None:\n if os.path.exists(sizes_filename):\n with open(sizes_filename, 'rb') as sizes_handler:\n sizes = pickle.load(sizes_handler)\n if sizes is None:\n sizes = dict()\n aux = 0\n for index, row in data_df.iterrows():\n sys.stderr.write('\\rdone {0:%}'.format(aux / len(data_df)))\n with open(row[path_column], 'rb') as file_handler:\n try:\n values = pickle.load(file_handler)\n except Exception as e:\n print(row[path_column])\n print(\"test\")\n print(e)\n raise ValueError()\n if len(values) not in sizes.keys():\n sizes[len(values)] = []\n sizes[len(values)].append(row['episode'])\n aux += 1\n if sizes_filename is not None:\n with open(sizes_filename, 'wb') as sizes_handler:\n pickle.dump(sizes, sizes_handler)\n return sizes",
"def split_file(self, input_file):\r\n file_list = [] \r\n with open(input_file, 'r', encoding='GB18030', errors='ignore') as f_in:\r\n data = f_in.readlines()\r\n lines_num = len(data)\r\n size = lines_num // self.num_workers # lines splitted in a chunk\r\n start = 0\r\n end = size\r\n w_path = \"../data/\"\r\n for i in range(lines_num//size):\r\n chunk_name = \"chunk_\" + str(i) + \".dat\"\r\n with open(w_path + chunk_name, 'w', encoding='utf-8') as f_out:\r\n f_out.write(''.join(data[start:end]))\r\n start = start + size\r\n end = end + size\r\n file_list.append(\"../data/chunk_\" + str(i) + \".dat\")\r\n \r\n print(f\"File splitted into {self.num_workers} chunks.\")\r\n return file_list, size",
"def train_test_split(df, test_size=0.3):\r\n # split df here\r\n train_size = int(df.shape[0] * (1 - test_size))\r\n test_size = df.shape[0] - train_size\r\n train = df[:train_size]\r\n test = df[train_size:]\r\n\r\n return train, test # return the train and test datasets\r",
"def to_chunked_dataframe(\n self, max_chunk_size: int = -1, timeout_sec: int = DEFAULT_TIMEOUT_SEC\n ) -> pd.DataFrame:\n # Max chunk size defined by user\n records = []\n for result in self.result(timeout_sec=timeout_sec):\n result.append(records)\n if len(records) == max_chunk_size:\n df = pd.DataFrame.from_records(records)\n records.clear() # Empty records array\n yield df\n\n # Handle for last chunk that is < max_chunk_size\n if not records:\n yield pd.DataFrame.from_records(records)",
"def readFilesIntoDataFrame(nameTemplate, numOfFiles):\n #https://www.kaggle.com/arjanso/reducing-dataframe-memory-size-by-65\n\n list_of_dfs = []\n for i in range(numOfFiles):\n print ('Processing {0} out of {1} files'.format(i, numOfFiles))\n\n fileToProcess = fileLocation + nameTemplate.format(i)\n print 'fileToProcess=', fileToProcess\n \n if 'feather' in nameTemplate:\n read_df = feather.read_feather(fileToProcess)\n elif 'parquet' in nameTemplate:\n read_df = pd.read_parquet(fileToProcess)\n else:\n print 'This should not happen, nameTemplate is wrong, please check it is in parquet or feather format or that the template correctly describes the existing files, exiting...'\n sys.exit(1)\n\n print read_df.info(memory_usage='deep')\n print '-'*50\n print read_df.describe()\n list_of_dfs.append(read_df)\n \n print 'Start concatenating dataframes, it may take some time'\n comb_df = pd.concat(list_of_dfs, ignore_index=True)\n return comb_df",
"def large_xarray_to_multi_parquet(xdf,float_id):\n for lvl in tqdm(xdf.N_PARAM.values.tolist()):\n df = xdf.sel(N_PARAM=lvl).to_dataframe()\n df = df.reset_index()\n df = process_chunked_df(df,float_id)\n df.to_parquet(f\"temp/{str(lvl)}.parquet\",use_deprecated_int96_timestamps=True)",
"def split_data(df):\n\trandom_seed = 1\n\tdf_train = df.sample(frac=0.8, random_state=random_seed)\n\tdf_rem = df.loc[~df.index.isin(df_train.index)]\n\tdf_valid = df_rem.sample(frac=0.5, random_state=random_seed)\n\tdf_test = df_rem.loc[~df_rem.index.isin(df_valid.index)]\n\tlogger.info(\"Shape of training dataframe: \" + str(df_train.shape))\n\tlogger.info(\"Shape of validation dataframe: \" + str(df_valid.shape))\n\tlogger.info(\"Sahpe of test dataframe: \" + str(df_test.shape))\n\n\treturn df_train, df_valid, df_test"
] | [
"0.73547655",
"0.67736286",
"0.6035461",
"0.59178704",
"0.5877275",
"0.58713686",
"0.584475",
"0.5791403",
"0.57895786",
"0.5737366",
"0.57224447",
"0.57164466",
"0.5714758",
"0.57108814",
"0.5700032",
"0.5689146",
"0.56854934",
"0.5663078",
"0.5655827",
"0.5648846",
"0.5632305",
"0.56116503",
"0.5605787",
"0.5585047",
"0.5555051",
"0.55461866",
"0.55366606",
"0.553417",
"0.55228126",
"0.55179846"
] | 0.70206773 | 1 |
Embed words in a sequence using GLoVE model | def __glove_embed__(sequence, model):
embedded = []
for word in sequence:
embedded.append(model[word])
return embedded | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def embed(self, sequence):\n words = sequence.split(' ')\n vecs = [self._E[self._w2i[i]] if i in self._w2i else self._E[self._w2i[\"UNK\"]]\n for i in words]\n return vecs",
"def embed(self, loader, model):\n print(\" ** Embedding words\")\n\n words = loader.words\n vectors = [model.get_word_vector(word) for word in words]\n\n return [(w, *v) for w, v in zip(words, vectors)]",
"def generateByWord(model, voc, maxlen=20, diversity=0.5, numwords=42):\n\n text, sym_indices, indices_sym = voc\n syms = set(text)\n start_index = random.randint(0, len(text) - maxlen - 1) \n generated = ''\n sentence = text[start_index: start_index + maxlen]\n \n #generated += sentence\n generated += ' '.join(sentence)\n print('----- Generating with seed: \"' + ' '.join(sentence) + '\"')\n sys.stdout.write(generated)\n\n for i in range(numwords):\n x = np.zeros((1, maxlen, len(syms)))\n for t, sym in enumerate(sentence):\n x[0, t, sym_indices[sym]] = 1.\n \n preds = model.predict(x, verbose=0)[0]\n next_index = sample(preds, diversity)\n next_sym = indices_sym[next_index]\n generated += ' '+next_sym\n sentence.append(next_sym)\n tmpsentence = sentence[1:]\n sentence = tmpsentence\n sys.stdout.write(next_sym+' ')\n sys.stdout.flush()\n print()",
"def glove_embedding(self, texts, file):\n self.embedding_dict = dict()\n glove_file = open(file, encoding='utf-8')\n for line in glove_file:\n word_vector = line.split()\n word = word_vector[0]\n word_vector_arr = np.asarray(word_vector[1:], dtype='float32')\n self.embedding_dict[word] = word_vector_arr\n glove_file.close()\n \n i = 0\n with pgb.ProgressBar(max_value=len(texts)) as bar:\n for text in texts:\n vec = []\n text = text.split()\n for t in text:\n try:\n vec.append(self.embedding_dict[t.lower()])\n except KeyError:\n pass\n ## There are no matched words\n if len(vec) == 0:\n print(\"len 0 vec\")\n self.word_vec.append(np.zeros((100)))\n else:\n #print(np.array(vec))\n #print(np.array(vec).shape)\n sentence = self.sentence_vec(np.array(vec))\n #print(sentence)\n #print(sentence.shape)\n self.word_vec.append(sentence)\n i += 1\n bar.update(i)\n self.word_vec = np.array(self.word_vec)\n print(self.word_vec.shape)",
"def generate_sentence_embeddings():\n generate_embeddings_sentence(\"Data/en-train.json\", \"Data_Sent_Embds/en_sent.pkl\")\n generate_embeddings_sentence(\"Data/es-train.json\", \"Data_Sent_Embds/es_sent.pkl\")\n generate_embeddings_sentence(\"Data/pr-train.json\", \"Data_Sent_Embds/pr_sent.pkl\")",
"def generate_conll2003_embeddings():\n glove_embedding = get_glove_embedding()\n\n word2index = {}\n idx2word = {}\n embed_array = []\n\n word2index[\"<pad>\"] = 1\n embed_array.append(init_embedding())\n\n word2index[\"<unk>\"] = 0\n embed_array.append(init_embedding())\n\n data = []\n with open(TRAIN_DATA_PATH, \"r\") as f:\n for line in f:\n data.append(json.loads(line))\n\n idx = 2\n\n for sample in tqdm(data, total=len(data)):\n words = sample[\"tokens\"]\n\n for w in words:\n w = w.lower()\n\n # if word is not present in dictionary, add to dictionary and append embedding vector\n if w not in word2index.keys():\n word2index[w] = idx\n idx += 1\n if w not in glove_embedding.keys():\n ev = init_embedding()\n else:\n ev = glove_embedding[w]\n\n embed_array.append(ev)\n\n else:\n continue\n\n # save embeddings\n embed_array = np.vstack(embed_array)\n np.save(EMBD_OUTPUT_PATH, embed_array)\n\n # save dictionary\n print(\"Dicitionary Size: \", len(word2index))\n with open(DICTIONARY_OUTPUT_PATH, \"w\") as f:\n json.dump(word2index, f)",
"def embed(text: str) -> np.ndarray:\n n = nlp(text)\n return n.vector",
"def input_new_phrase(self, text):\n \n x_new_tokens = [word_idx[word] for word in text.split()]\n \n pred = self.model.predict(np.array([x_new_tokens]))\n pred = np.argmax(pred, axis=-1)[0]\n \n return [[word_list[w], tags[pred]] for (w, pred) in zip(range(len(x_new)), pred)]",
"def get_glove_embedding():\n embedding = {}\n N = 400_000\n print(\"Reading glove embedding...\")\n with open(GLOVE_EMBD_PATH, \"rb\") as f:\n for line in tqdm(f, total=N):\n line = line.decode().split()\n word = line[0].lower()\n vector = np.array(line[1:]).astype(np.float32)\n embedding[word] = vector\n\n return embedding",
"def embed(raw_seq, index_dict):\n return np.asarray([index_dict[word.lower()]\n if word.lower() in index_dict\n else index_dict[OOV_TOKEN] for word in raw_seq])",
"def build_seq_embeddings(self):\n with tf.variable_scope(\"seq_embedding\"), tf.device(\"/cpu:0\"):\n embedding_map = tf.get_variable(\n name=\"map\",\n shape=[self.config.vocab_size, self.config.word_embedding_size],\n initializer=self.initializer)\n \n # We need to store the normalized lookup table for efficient mapping of embedding vectors to closest words\n self.normed_embedding_map = tf.nn.l2_normalize(embedding_map, dim=1)\n \n seq_embeddings = tf.nn.embedding_lookup(embedding_map, self.input_seqs) \n # seq_embeddings has the shape (batch_size, sequence_length, sentence_length, embedding_size)\n # meaning, for each index in input_seqs (with shape (batch_size, sequence_length, sentence_length)) it stores an embedding vector\n\n #print('Shape seq_embeddings: ' + str(seq_embeddings.get_shape()))\n\n self.seq_embeddings = seq_embeddings",
"def generate_text(model, w2vmodel, nb_epoch, length=75, max_seq_length=20, seed=\"Rain drop drop top\"):\n global sample\n generated = ''\n sequences = seed\n\n generated += seed\n\n #clean seed\n seed=re.sub(r'<[^<]+?>', '', seed)\n #remove encoding characters like \\x86\n seed=re.sub(r'[^\\x00-\\x7f]','',seed)\n seed=re.sub(r'\\#','',seed)\n #remove punctuation\n seed=re.sub(r'[^A-Za-z0-9\\s]','',seed)\n\n #shorten if longer than max_seq_length\n seed = seed.split(' ')[:max_seq_length]\n\n word_ix_list = []\n for word in seed:\n try:\n word = word_to_ix(word,w2vmodel)\n except:\n #since we're using -1 as a null word (why we also pad with the not in vocab index), we'll use that for words that aren't in the word2vec model\n print('Warning: {0} not contained in training vocabulary. It will be ignored when computing output.'.format(word))\n word = word_to_ix('_UNSEEN_',w2vmodel)\n word_ix_list.append(word)\n\n #pad word_list with the unseen word2vec if shorter than max_seq_length\n word_ix_list = [word_to_ix('_UNSEEN_',w2vmodel)] * (max_seq_length-len(word_ix_list)) + word_ix_list\n\n for temp in [0.2, 0.5, .75, 1.0]:\n print('temperature: ', temp)\n for word in range(length):\n #reshape wordlist\n word_ix_list = np.asarray(word_ix_list).reshape(1,max_seq_length)\n\n #prediction = model.predict(x=word_ix_list)\n #next_ix = np.argmax(prediction)\n prediction = model.predict(x=word_ix_list,verbose=0)[0]\n next_ix = sample(prediction, temp)\n predicted_word = ix_to_word(next_ix,w2vmodel)\n\n generated += (' ' + predicted_word) #add predicted word to the generated output\n\n #remove first word from the word list to reduce the array for the max sequence length for the model\n word_ix_list = np.append(word_ix_list,next_ix)\n word_ix_list.shape\n word_ix_list = np.delete(word_ix_list,0,0)\n print(generated)\n print('-----')\n #print(generated)\n return",
"def generate_sentence(model, opener_words):\n\n sentence=[]\n #sentences between 3 and 15 words\n length= random.randint(3,6)\n keys=model.keys()\n bigram=random.choice(list(keys))\n\n #choose a first word that can be a starter word\n while bigram[0] not in opener_words:\n bigram=random.choice(list(keys))\n #iterate until sentence is correct length\n for i in range(0,length):\n matches=[]\n found=False\n while not found:\n\n #search in keys for key[0] to match the bigram[1]\n for key in keys:\n regex=re.compile(r\"\\b%s\\b\"%bigram[1])\n result=regex.match(key[0])\n if result:\n matches.append(key)\n found=True\n if not found:\n matches=[]\n i=0\n bigram=random.choice(list(keys))\n sentence.pop()\n\n #add first member of bigram to sentence list\n sentence.append(bigram[1])\n #choose next bigram from the list of matches\n bigram=random.choice(matches)\n\n #combine strings from list\n return \" \".join(sentence)",
"def Emojify_V2(input_shape, word_to_vec_map, word_to_index):\n \n ### START CODE HERE ###\n # Define sentence_indices as the input of the graph.\n # It should be of shape input_shape and dtype 'int32' (as it contains indices, which are integers).\n sentence_indices = Input(shape = input_shape, dtype = 'int32')\n \n # Create the embedding layer pretrained with GloVe Vectors (≈1 line)\n # def pretrained_embedding_layer(word_to_vec_map, word_to_index): # return embedding_layer\n embedding_layer = pretrained_embedding_layer(word_to_vec_map, word_to_index)\n \n # Propagate sentence_indices through your embedding layer\n # (See additional hints in the instructions).\n embeddings = embedding_layer(sentence_indices) \n \n # Propagate the embeddings through an LSTM layer with 128-dimensional hidden state\n # The returned output should be a batch of sequences.\n X = LSTM(units = 128, return_sequences = True)(embeddings)\n # Add dropout with a probability of 0.5\n X = Dropout(rate = 0.5)(X)\n # Propagate X trough another LSTM layer with 128-dimensional hidden state\n # The returned output should be a single hidden state, not a batch of sequences.\n X = LSTM(units = 128, return_sequences = False)(X)\n # Add dropout with a probability of 0.5\n X = Dropout(rate = 0.5)(X) \n # Propagate X through a Dense layer with 5 units\n X = Dense(units = 5)(X)\n # Add a softmax activation\n X = Activation(activation = 'softmax')(X)\n \n # Create Model instance which converts sentence_indices into X.\n model = Model(inputs = sentence_indices, outputs = X)\n \n ### END CODE HERE ###\n \n return model",
"def generate_words_greedily(self, model, session, X, words_to_idx):\n \n Xorig_clean = self.cleanOutput(X, words_to_idx)\n \n for i in range(len(X)):#iterate over allscentences\n #set eos pointer to eos index\n p_eos = np.argwhere(np.array(X[i])==words_to_idx['<eos>'])[0][0] # 2 is eos but would be better using the dict\n while True:\n #compute predictions\n feed_dict = {self.input_x: np.array(X[i]).reshape((1,29)),\n self.input_y: np.array(X[i]).reshape((1,29))} # input_y is not needed\n \n prediction, sentence_probability = session.run([self.predictions, self.sentence_probability], feed_dict)\n \n lastpred = prediction[0,p_eos-1]\n X[i][p_eos]=lastpred\n \n p_eos += 1\n if lastpred == words_to_idx['<eos>'] or p_eos==29: break\n \n #postprocess X\n Xclean = self.cleanOutput(X, words_to_idx)\n self.create_submission_file(Xorig_clean, task='originalX')\n self.create_submission_file(Xclean, task='continuation')",
"def glove_embedding(self, word_index, padded, embedding_matrix, part='q'):\n print(\"*\" * 50, \"Start Glove embedding process\", \"*\" * 50)\n start_time = time()\n\n\n MAX_SEQ_LEN = None\n if part == 'q':\n MAX_SEQ_LEN = self.MAX_Q_SEQ_LEN\n elif part == 'a':\n MAX_SEQ_LEN = self.MAX_A_SEQ_LEN\n else:\n print(f\"Please indicate you want embedding question part or answer part\")\n\n\n input_layer = Input(shape=(MAX_SEQ_LEN,), dtype='int32')\n embedding_layer = Embedding(input_dim = len(word_index) + 1,\n output_dim = self.EMBEDDING_DIM,\n weights=[embedding_matrix],\n input_length=MAX_SEQ_LEN,\n trainable=False)(input_layer)\n # (number of sample, MAX_SEQ_LEN, EMBEDING_DIM)\n model = Model(inputs=input_layer, outputs=embedding_layer)\n model.compile('rmsprop', 'mse')\n output_array = model.predict(padded)\n\n cost_time = round((time() - start_time), 4)\n print(\"*\" * 40, \"End Glove embedding() with {} seconds\".format(cost_time), \"*\" * 40, end='\\n\\n')\n\n return output_array, embedding_layer",
"def forward(self, doc):\n out = torch.tensor([]).float().to(self.device)\n\n for i in range(len(doc)):\n sentences_raw = sentencesplit(cleantxt(doc[i]))\n sentences_ready = torch.tensor([]).float().to(self.device)\n for sentence in sentences_raw:\n sentence = sentence.split()\n if sentence == []:\n continue\n lookup_tensor = torch.tensor([]).long().to(self.device)\n for word in sentence:\n if word in self.embedd_dict:\n lookup_tensor = torch.cat((lookup_tensor,\n torch.LongTensor([self.embedd_dict[word]])), 0)\n else:\n lookup_tensor = torch.cat((lookup_tensor, torch.LongTensor([0])), 0)\n # Word embedding\n xw = self.word_embedding(lookup_tensor).view(1, -1, self.embedding_dim).to(self.device)\n # Word GRU\n self.hidden_gru_words = self.init_hidden_words()\n hw, self.hidden_gru_words = self.gru_word(xw, self.hidden_gru_words)\n # Word MLP\n uw = nn.Tanh()(self.MLP_word(hw)).to(self.device)\n # Word attention\n attention_score = torch.matmul(uw, self.attention_word).squeeze().to(self.device)\n attention_score = F.softmax(attention_score, dim=0).view(uw.size(0), uw.size(1), 1).to(self.device)\n scored_x = (hw * attention_score).to(self.device)\n s = torch.sum(scored_x, dim=1).to(self.device)\n #collecting sentences\n sentences_ready = torch.cat((sentences_ready, s), 0)\n # Sentence GRU\n if len(sentences_ready) == 0:\n out = torch.cat((out,\n torch.randn(1, self.number_cat).to(self.device)), 0).to(self.device)\n continue\n sentences_ready_gru = sentences_ready.view(1, -1, self.embedding_dim).to(self.device)\n self.hidden_gru_sentences = self.init_hidden_sentences()\n hs, self.hidden_gru_sentences = self.gru_sentence(torch.tensor(sentences_ready_gru), self.hidden_gru_sentences)\n # SENTENCE MLP\n us = nn.Tanh()(self.MLP_sentence(hs)).to(self.device)\n # Sentence attention\n attention_score = torch.matmul(us, self.attention_sentence).squeeze().to(self.device)\n attention_score = F.softmax(attention_score, dim=0).view(us.size(0), us.size(1), 1).to(self.device)\n scored_x = (hs * attention_score).to(self.device)\n v = torch.sum(scored_x, dim=1).to(self.device)\n # classification\n p = self.MLP_classification(v).to(self.device)\n out = torch.cat((out, p.float()), 0).float().to(self.device)\n return out",
"def create_embedding(skills):\n corpus = list(skills[\"description\"].values)\n embedder = SentenceTransformer(config[\"sentence_transformer\"][\"model\"])\n embedding = embedder.encode(corpus, show_progress_bar=True)\n return embedding",
"def embedding_sentence_with_model(input_file, save_path, max_length, model_path):\n # load glove model\n model = gensim.models.KeyedVectors.load_word2vec_format(model_path)\n lines = _read_csv(input_file)\n split_lines = []\n label_list = []\n for line in lines:\n split_lines.append(sentence_split(line[1], max_length))\n label_list.append(int(line[2]))\n del lines\n\n writer = tf.python_io.TFRecordWriter(save_path)\n for index, line in enumerate(split_lines):\n bytes_words = []\n for word in line:\n if word in model:\n bytes_words.extend(model[word])\n else:\n bytes_words.extend([0] * 300)\n example = tf.train.Example(features=tf.train.Features(feature={\n \"label\":\n tf.train.Feature(int64_list=tf.train.Int64List(value=[label_list[index]])),\n \"features\":\n tf.train.Feature(float_list=tf.train.FloatList(value=bytes_words))\n }))\n writer.write(example.SerializeToString())",
"def lemmatize_verbs(self):\n lemmas = []\n # lemmas = \"\"\n for word in self.words:\n lemma = wn.lemmatize(word, pos='v')\n lemmas.append(lemma)\n # lemmas += f\"{lemma} \"\n self.words = lemmas\n return self",
"def build_bilstm(self, verbose=True):\r\n word_ids = Input(batch_shape=(None, None), dtype='int32', name='word_input')\r\n inputs = [word_ids]\r\n\r\n if self._params.use_pretrain_embedding:\r\n if verbose: logging.info(\"initial word embedding with pretrained embeddings\")\r\n if self._params.word_embedding_dim == 100:\r\n glove_file = self._params.data_dir + '/glove.6B.100d.txt'\r\n elif self._params.word_embedding_dim == 300:\r\n glove_file = self._params.data_dir + '/glove.42B.300d.txt'\r\n else:\r\n logging.error(\"we only support glove embedding with dimension 100 or 300\")\r\n raise ValueError(\"unmatch word dimension, we only support glove embedding with dimension 100 or 300\")\r\n glove_embedding_index = load_glove(glove_file, self._params.word_embedding_dim)\r\n word_vocab = self.input_processor.word_vocab.vocab\r\n glove_embeddings_matrix = np.zeros([len(word_vocab), self._params.word_embedding_dim])\r\n for word, i in word_vocab.items():\r\n vector = glove_embedding_index.get(word)\r\n if vector is not None:\r\n glove_embeddings_matrix[i] = vector\r\n \r\n word_embeddings = Embedding(input_dim=glove_embeddings_matrix.shape[0],\r\n output_dim=glove_embeddings_matrix.shape[1],\r\n trainable=False,\r\n mask_zero=True,\r\n weights=[glove_embeddings_matrix],\r\n name='word_embedding')(word_ids)\r\n else:\r\n word_embeddings = Embedding(input_dim=self._params.word_vocab_size,\r\n output_dim=self._params.word_embedding_dim,\r\n mask_zero=True,\r\n name='word_embedding')(word_ids)\r\n\r\n input_embeddings = [word_embeddings]\r\n if self._params.use_char:\r\n char_ids = Input(batch_shape=(None, None, None), dtype='int32', name='char_input')\r\n inputs.append(char_ids)\r\n if self._params.char_feature == \"lstm\":\r\n char_embeddings = Embedding(input_dim=self._params.char_vocab_size,\r\n output_dim=self._params.char_embedding_dim,\r\n mask_zero=True,\r\n name='char_embedding')(char_ids)\r\n if verbose: logging.info(\"using charcter level lstm features\")\r\n char_feas = TimeDistributed(Bidirectional(LSTM(self._params.char_lstm_size)), name=\"char_lstm\")(char_embeddings)\r\n elif self._params.char_feature == \"cnn\":\r\n # cnn do not support mask\r\n char_embeddings = Embedding(input_dim=self._params.char_vocab_size,\r\n output_dim=self._params.char_embedding_dim,\r\n name='char_embedding')(char_ids)\r\n if verbose: logging.info(\"using charcter level cnn features\")\r\n char_feas = char_cnn_encode(char_embeddings, self._params.n_gram_filter_sizes, self._params.n_gram_filter_nums)\r\n else:\r\n raise ValueError('char feature must be lstm or cnn')\r\n\r\n input_embeddings.append(char_feas)\r\n\r\n if self._params.use_pos:\r\n if verbose: logging.info(\"use pos tag features\")\r\n pos_ids = Input(batch_shape=(None, None), dtype='int32', name='pos_input')\r\n inputs.append(pos_ids)\r\n\r\n\r\n pos_embeddings = Embedding(input_dim=self._params.pos_vocab_size,\r\n output_dim=self._params.pos_embedding_dim,\r\n mask_zero=True,\r\n name='pos_embedding')(pos_ids)\r\n input_embeddings.append(pos_embeddings)\r\n\r\n if self._params.use_dict:\r\n if verbose: logging.info(\"use user dict features\")\r\n dict_ids = Input(batch_shape=(None, None), dtype='int32', name='dict_input')\r\n inputs.append(dict_ids)\r\n\r\n dict_embeddings = Embedding(input_dim=self._params.dict_vocab_size,\r\n output_dim=self._params.dict_embedding_dim,\r\n mask_zero=True,\r\n name='dict_embedding')(dict_ids)\r\n input_embeddings.append(dict_embeddings)\r\n\r\n input_embedding = Concatenate(name=\"input_embedding\")(input_embeddings) if len(input_embeddings)>1 else input_embeddings[0]\r\n input_embedding_ln = LayerNormalization(name='input_layer_normalization')(input_embedding)\r\n #input_embedding_bn = BatchNormalization()(input_embedding_ln)\r\n input_embedding_drop = Dropout(self._params.dropout, name=\"input_embedding_dropout\")(input_embedding_ln)\r\n\r\n z = Bidirectional(LSTM(units=self._params.main_lstm_size, return_sequences=True, dropout=0.2, recurrent_dropout=0.2),\r\n name=\"main_bilstm\")(input_embedding_drop)\r\n z = Dense(self._params.fc_dim, activation='tanh', name=\"fc_dense\")(z)\r\n\r\n if self._params.use_crf:\r\n if verbose: logging.info('use crf decode layer')\r\n crf = CRF(self._params.num_labels, sparse_target=False,\r\n learn_mode='marginal', test_mode='marginal', name='crf_out')\r\n loss = crf.loss_function\r\n pred = crf(z)\r\n else:\r\n loss = 'categorical_crossentropy'\r\n pred = Dense(self._params.num_labels, activation='softmax', name='softmax_out')(z)\r\n\r\n model = Model(inputs=inputs, outputs=pred)\r\n model.summary(print_fn=lambda x: logging.info(x + '\\n'))\r\n model.compile(loss=loss, optimizer=self._params.optimizer)\r\n\r\n self.model = model",
"def translate_beam_search(source_sentence: List[int], model: Seq2SeqAttentionModel,\n beam_width: int, max_length=10) -> Tuple[List[int], float]:\n encoder_hiddens = encode_all(source_sentence, model)\n beam_elems = []\n # stack x hid_dim\n prev_hidden = encoder_hiddens[-1]\n prev_context = torch.zeros(model.hidden_dim)\n\n beam_elems= [([SOS_token], float(0), prev_hidden, prev_context)]\n candidate_translations = []\n available_width = beam_width\n\n for i in range(max_length):\n if available_width >0:\n candidate_beam_elems = []\n for b in range(len(beam_elems)):\n prev_predict, prev_log_prob, prev_hidden, prev_context = beam_elems[b]\n probs, prev_hidden, prev_context, _ = decode(prev_hidden, encoder_hiddens, prev_context,\n prev_predict[-1], model)\n log_probs = torch.log(probs)\n top_log_probs, top_preds = torch.topk(log_probs,available_width)\n for k in range(len(top_log_probs)):\n curr_log_prob = prev_log_prob + top_log_probs[k].item()\n curr_pred_list = prev_predict + [top_preds[k].item()]\n candidate = (curr_pred_list, curr_log_prob, prev_hidden, prev_context)\n candidate_pos = -1\n for pos in range(len(candidate_beam_elems)):\n if curr_log_prob > candidate_beam_elems[pos][1]:\n candidate_pos = pos\n if not candidate_pos == -1:\n candidate_beam_elems.insert(candidate_pos+1, candidate)\n elif len(candidate_beam_elems) < available_width:\n candidate_beam_elems.append(candidate)\n if len(candidate_beam_elems) > available_width:\n candidate_beam_elems.pop()\n\n beam_elems = []\n for candidate in candidate_beam_elems:\n if candidate[0][-1] == EOS_token or i==(max_length-1):\n candidate_translations.append(candidate)\n available_width -= 1\n else:\n beam_elems.append(candidate)\n\n max_prob = -math.inf\n best_elem = -1\n for pos in range(len(candidate_translations)):\n norm_prob = candidate_translations[pos][1]/len(candidate_translations[pos][0])\n if norm_prob > max_prob:\n max_prob = norm_prob\n best_elem = pos\n\n # remove SOS token from the beginning\n del candidate_translations[best_elem][0][0]\n\n return candidate_translations[best_elem][0], candidate_translations[best_elem][1]",
"def text2vec(doc_tok, model, dim=300):\n doc_embedding = np.zeros(dim)\n valid_words = 0\n for word in doc_tok:\n if word in model:\n valid_words += 1\n doc_embedding += model.query(word)\n else:\n continue\n if valid_words > 0:\n return doc_embedding / valid_words\n else:\n return doc_embedding",
"def _add_seq2seq(self):\n hps = self._hps\n vsize = self._vocab.size() # size of the vocabulary\n \n with tf.variable_scope('seq2seq'):\n # Some initializers\n self.rand_unif_init = tf.random_uniform_initializer(-hps.rand_unif_init_mag, hps.rand_unif_init_mag, seed=123)\n self.trunc_norm_init = tf.truncated_normal_initializer(stddev=hps.trunc_norm_init_std)\n\n\n with tf.variable_scope('embedding'):\n if hps.pretrained_embeddings:\n word2vec = load_embeddings(hps.embeddings_path, self._vocab.word2id, hps.rand_unif_init_mag)\n self.embedding = tf.get_variable('embedding', [vsize, hps.emb_dim],\n dtype=tf.float32, initializer=tf.constant_initializer(word2vec))\n # self.assign_embedding = tf.assign(self.embedding, word2vec)\n else:\n self.embedding = tf.get_variable('embedding', [vsize, hps.emb_dim],\n dtype=tf.float32, initializer=self.trunc_norm_init)\n if hps.mode==\"train\": self._add_emb_vis(self.embedding) # add to tensorboard\n\n # tensor with shape (batch_size, max_enc_steps, emb_size)\n emb_enc_inputs = tf.nn.embedding_lookup(self.embedding, self._enc_batch)\n if self._hps.hier:\n enc_batch_sections = tf.unstack(self._enc_batch_sections, axis=1)\n sec_emb_enc_inputs = [tf.nn.embedding_lookup(self.embedding, section)\n for section in enc_batch_sections]\n # list length max_dec_steps containing shape (batch_size, emb_size)\n emb_dec_inputs = [tf.nn.embedding_lookup(self.embedding, x)\n for x in tf.unstack(self._dec_batch, axis=1)]\n\n\n # Hierarchical attention model\n if self._hps.hier:\n with tf.variable_scope('encoder'), tf.device(self._next_device()):\n sec_enc_outs = []\n states_fw = []\n states_bw = []\n states = []\n\n # level 1, encode words to sections\n with tf.variable_scope(\"word_level_encoder\", reuse=tf.AUTO_REUSE) as scope:\n encoder_outputs_words = []\n cell_fw = tf.contrib.rnn.LSTMCell(self._hps.hidden_dim, initializer=self.rand_unif_init, state_is_tuple=True)\n cell_bw = tf.contrib.rnn.LSTMCell(self._hps.hidden_dim, initializer=self.rand_unif_init, state_is_tuple=True)\n fw_st, bw_st = None, None\n if self._hps.use_do: # DropOut\n cell_fw = tf.contrib.rnn.DropoutWrapper(cell_fw, output_keep_prob=1.0 - self._hps.do_prob)\n cell_bw = tf.contrib.rnn.DropoutWrapper(cell_bw, output_keep_prob=1.0 - self._hps.do_prob)\n for i in range(self._hps.num_sections):\n encoder_tmp_output, (fw_st, bw_st) = tf.nn.bidirectional_dynamic_rnn(\n cell_fw, cell_bw, inputs=sec_emb_enc_inputs[i], dtype=tf.float32,\n sequence_length=self._batch_sections_len[:,i], swap_memory=True, initial_state_bw=bw_st, initial_state_fw=fw_st)\n # concatenate the forwards and backwards states\n encoder_tmp_output = tf.concat(axis=2, values=encoder_tmp_output) #shape=[batch x seq_len x hidden_size]\n \n encoder_outputs_words.append(encoder_tmp_output)\n # instead of concating the fw and bw states, we use a ff network\n combined_state = self._reduce_states(fw_st, bw_st)\n states.append(combined_state)\n scope.reuse_variables()\n \n # level 2, encode sections to doc\n encoder_outputs_words = tf.stack(encoder_outputs_words, axis=1) # shape [batch x num_sections x seq_len x hidden_size]\n shapes = encoder_outputs_words.shape\n encoder_outputs_words = tf.reshape(encoder_outputs_words, (shapes[0].value, -1, shapes[-1].value)) #shape=[batch x (seq_len * num_sections) x hidden_size]\n\n doc_sections_h = tf.stack([s.h for s in states], axis=1) # [batch x num_sections x hidden_size]\n doc_sections_c = tf.stack([s.c for s in states], axis=1) # [batch x num_sections x hidden_size]\n\n with tf.variable_scope(\"section_level_encoder\"):\n if FLAGS.section_level_encoder == 'RNN':\n cell_fw_1 = tf.contrib.rnn.LSTMCell(self._hps.hidden_dim, initializer=self.rand_unif_init, state_is_tuple=True)\n cell_bw_1 = tf.contrib.rnn.LSTMCell(self._hps.hidden_dim, initializer=self.rand_unif_init, state_is_tuple=True)\n if self._hps.use_do:\n cell_fw_1 = tf.contrib.rnn.DropoutWrapper(cell_fw_1, output_keep_prob=1.0 - self._hps.do_prob)\n cell_bw_1 = tf.contrib.rnn.DropoutWrapper(cell_bw_1, output_keep_prob=1.0 - self._hps.do_prob)\n encoder_output_sections, (fw_st_2, bw_st_2) =\\\n tf.nn.bidirectional_dynamic_rnn(cell_fw_1, cell_bw_1, inputs=doc_sections_h, sequence_length=self._doc_sec_lens, dtype=tf.float32, swap_memory=True)\n encoder_output_sections = tf.concat(axis=2, values=encoder_output_sections)\n doc_sections_state = self._reduce_states(fw_st_2, bw_st_2)\n else:\n if FLAGS.section_level_encoder == 'AVG': # average section cells\n doc_sections_state_h = tf.reduce_mean(doc_sections_h, axis=1)\n doc_sections_state_c = tf.reduce_mean(doc_sections_c, axis=1)\n elif FLAGS.section_level_encoder == 'FF': # use a feedforward network to combine section cells\n doc_sections_state_h = tf.reshape([doc_sections_h.shape[0].eval(), -1])\n doc_sections_state_h = tf.layers.dense(\n inputs=doc_sections_state_h,\n units=self._hps.hidden,\n activation=tf.nn.relu) \n doc_sections_state_c = tf.reshape([doc_sections_c.shape[0].eval(), -1])\n doc_sections_state_c = tf.layers.dense(\n inputs=doc_sections_state_c,\n units=self._hps.hidden,\n activation=tf.nn.relu)\n else:\n raise AttributeError('FLAGS.section_level_encoder={} is not a valid option'.format(FLAGS.section_level_encoder))\n doc_sections_state = tf.contrib.rnn.LSTMStateTuple(doc_sections_state_c, doc_sections_state_h)\n encoder_output_sections = doc_sections_h \n \n elif not self._hps.multi_layer_encoder:\n with tf.variable_scope('encoder'):\n with tf.variable_scope('word_level_encoder'):\n cell_fw = tf.contrib.rnn.LSTMCell(self._hps.hidden_dim, initializer=self.rand_unif_init, state_is_tuple=True)\n cell_bw = tf.contrib.rnn.LSTMCell(self._hps.hidden_dim, initializer=self.rand_unif_init, state_is_tuple=True)\n (encoder_outputs, (fw_st, bw_st)) =\\\n tf.nn.bidirectional_dynamic_rnn(cell_fw, cell_bw, inputs=emb_enc_inputs, dtype=tf.float32, sequence_length=self._enc_lens, swap_memory=True)\n # concatenate the forwards and backwards states\n encoder_outputs = tf.concat(axis=2, values=encoder_outputs)\n \n # stack n layers of lstms for encoder\n elif self._hps.multi_layer_encoder:\n # TODO: check\n for layer_i in xrange(self._hps.enc_layers):\n with tf.variable_scope('encoder%d'%layer_i), tf.device(\n self._next_device()):\n cell_fw = tf.contrib.rnn.LSTMCell(self._hps.hidden_dim, initializer=self.rand_unif_init, state_is_tuple=True)\n cell_bw = tf.contrib.rnn.LSTMCell(self._hps.hidden_dim, initializer=self.rand_unif_init, state_is_tuple=True)\n if self._hps.use_do: # add dropout\n cell_fw = tf.contrib.rnn.DropoutWrapper(cell_fw, output_keep_prob=1.0 - self._hps.do_prob)\n cell_bw = tf.contrib.rnn.DropoutWrapper(cell_bw, output_keep_prob=1.0 - self._hps.do_prob)\n emb_enc_inputs, (fw_st, bw_st) =\\\n tf.nn.bidirectional_dynamic_rnn(cell_fw, cell_bw, inputs=emb_enc_inputs, dtype=tf.float32, sequence_length=self._enc_lens, swap_memory=True)\n emb_enc_inputs = tf.concat(axis=2, values=emb_enc_inputs)\n encoder_outputs = emb_enc_inputs\n \n if self._hps.hier:\n self._enc_sec_states = encoder_output_sections\n self._enc_states = encoder_outputs_words \n else:\n self._enc_states = encoder_outputs\n self._enc_sec_states = None\n \n # convert the encoder bidirectional hidden state to the decoder state\n # (unidirectional) by an MLP\n if self._hps.hier:\n self._dec_in_state = doc_sections_state\n else:\n with tf.variable_scope('encoder'):\n with tf.variable_scope('word_level_encoder'):\n self._dec_in_state = self._reduce_states(fw_st, bw_st) \n \n # Add the decoder\n\n with tf.variable_scope('decoder'), tf.device(self._next_device()):\n cell = tf.contrib.rnn.LSTMCell(\n self._hps.hidden_dim,\n state_is_tuple=True,\n initializer=self.rand_unif_init)\n \n # We need to pass in the previous step's coverage vector each time\n prev_coverage = self.prev_coverage\\\n if hps.mode==\"decode\" and self._hps.coverage \\\n else None \n \n \n if self._hps.hier:\n decoder_outputs, self._dec_out_state, self.attn_dists, self.p_gens, self.coverage, self.attn_dists_sec =\\\n self.attn_decoder(emb_dec_inputs,\n self._dec_in_state,\n self._enc_states,\n cell,\n self._enc_sec_states,\n num_words_section=self._batch_sections_len,\n enc_padding_mask=self._enc_padding_mask,\n enc_section_padding_mask=self._enc_section_padding_mask,\n initial_state_attention=(self._hps.mode==\"decode\"),\n pointer_gen=self._hps.pointer_gen,\n use_coverage=self._hps.coverage,\n prev_coverage=prev_coverage,\n temperature=self._hps.temperature\n )\n \n else:\n decoder_outputs, self._dec_out_state, self.attn_dists, self.p_gens, self.coverage, _ =\\\n self.attn_decoder(emb_dec_inputs,\n self._dec_in_state,\n self._enc_states,\n cell,\n encoder_section_states=None,\n num_words_section=None,\n enc_padding_mask=self._enc_padding_mask,\n initial_state_attention=(self._hps.mode==\"decode\"),\n pointer_gen=self._hps.pointer_gen,\n use_coverage=self._hps.coverage,\n prev_coverage=prev_coverage,\n ) \n \n\n # Project decoder output to vocabulary\n with tf.variable_scope('output_projection'), tf.device(self._next_device()):\n if self._hps.output_weight_sharing:\n # share weights of embedding layer with projection\n # self.embedding is in shape [vsize, hps.emb_dim]\n w_proj = tf.get_variable('w_proj', [self._hps.emb_dim, self._hps.hidden_dim],\n dtype=tf.float32, initializer=self.trunc_norm_init)\n w = tf.tanh(tf.transpose(tf.matmul(self.embedding, w_proj))) # shape = [vsize, hps.hidden_dim]\n \n # w_t = tf.transpose(w)\n b = tf.get_variable('b', [vsize],\n dtype=tf.float32, initializer=self.trunc_norm_init)\n else: \n w = tf.get_variable('w', [self._hps.hidden_dim, vsize],\n dtype=tf.float32, initializer=self.trunc_norm_init)\n # w_t = tf.transpose(w)\n b = tf.get_variable('b', [vsize],\n dtype=tf.float32, initializer=self.trunc_norm_init)\n # vocabulary score at each decoder step\n vocab_scores = []\n for i,output in enumerate(decoder_outputs):\n if i > 0:\n tf.get_variable_scope().reuse_variables()\n vocab_scores.append(tf.nn.xw_plus_b(output, w, b)) # apply the linear layer\n\n # the final vocab distribution for each decoder time step\n # shape of each element is [batch_size, vsize]\n vocab_dists = [tf.nn.softmax(s) for s in vocab_scores] \n\n \n # pointing / generating\n if FLAGS.pointer_gen:\n final_dists = self._calc_final_dist(vocab_dists, self.attn_dists)\n# log_dists = [tf.log(dist) for dist in final_dists]\n else:\n# log_dists = [tf.log(dist) for dist in vocab_dists]\n final_dists = vocab_dists\n \n\n # Calculate Losses:\n \n if self._hps.mode in ['train', 'eval']:\n # Calculate the loss\n with tf.variable_scope('loss'), tf.device(self._next_device()):\n if FLAGS.pointer_gen:\n # Calculate the loss per step\n # This is fiddly; we use tf.gather_nd to pick out the gold target words\n # will be list length max_dec_steps containing shape (batch_size)\n loss_per_step = [] \n batch_nums = tf.range(0, limit=hps.batch_size) # shape (batch_size)\n for dec_step, dist in enumerate(final_dists):\n # The indices of the target words. shape (batch_size)\n targets = self._target_batch[:,dec_step] \n indices = tf.stack( (batch_nums, targets), axis=1) # shape (batch_size, 2)\n # shape (batch_size). loss on this step for each batch\n gold_probs = tf.gather_nd(dist, indices)\n losses = -tf.log(gold_probs)\n loss_per_step.append(losses)\n\n # Apply dec_padding_mask mask and get loss\n self._loss = _mask_and_avg(loss_per_step, self._dec_padding_mask)\n \n\n else: # baseline model\n # this applies softmax internally\n self._loss = tf.contrib.seq2seq.sequence_loss(\n tf.stack(vocab_scores, axis=1), self._target_batch, self._dec_padding_mask) # this applies softmax internally\n\n tf.summary.scalar('loss', self._loss)\n\n # Calculate coverage loss from the attention distributions\n if self._hps.coverage:\n with tf.variable_scope('coverage_loss'):\n self._coverage_loss = _coverage_loss(self.attn_dists, self._dec_padding_mask)\n tf.summary.scalar('coverage_loss', self._coverage_loss)\n self._total_loss = self._loss + self._hps.cov_loss_wt * self._coverage_loss\n tf.summary.scalar('total_loss', self._total_loss)\n \n # ---------------------------/\n\n\n if self._hps.mode == \"decode\":\n assert len(final_dists) == 1 # final_dists is a singleton list containing shape (batch_size, extended_vsize)\n final_dists = final_dists[0]\n topk_probs, self._topk_ids = tf.nn.top_k(final_dists, hps.batch_size*2) # take the k largest probs. note batch_size=beam_size in decode mode\n self._topk_log_probs = tf.log(topk_probs)",
"def embed(self, word: Any) -> dy.Expression:\n raise NotImplementedError('embed must be implemented in Embedder subclasses')",
"def generate_sentence(word1, word2, length, vocab, model):\n reverse_vocab = {idx: word for word, idx in vocab.items()}\n output_string = np.zeros((1, length), dtype=np.int)\n output_string[:, 0: 2] = vocab[word1], vocab[word2]\n\n for end in range(2, length):\n start = end - 2\n output_string[:, end] = np.argmax(model(output_string[:, start:end]), axis=1)\n text = [reverse_vocab[i] for i in list(output_string[0])]\n \n print(\" \".join(text))",
"def make_text(markov_chains):\n\n random_num = generate_random_number(markov_chains.keys())\n\n random_text = []\n\n start_words = generate_start_words(random_num, markov_chains.keys())\n \n random_text.extend(start_words)\n\n\n for i in range(500):\n word_tuple = (random_text[-2],random_text[-1])\n next_word = add_next_word(word_tuple, markov_chains)\n random_text.append(next_word)\n\n return random_text",
"def generate_text(session, model, config, starting_text='<eos>',\n stop_length=100, stop_tokens=None, temp=1.0):\n state = model.initial_state.eval()\n # Imagine tokens as a batch size of one, length of len(tokens[0])\n tokens = [model.vocab.encode(word) for word in starting_text.split()]\n for i in xrange(stop_length):\n ### YOUR CODE HERE\n #print tokens\n feed = {}\n #x = np.array([tokens[-1]])\n #x.reshape(1,1)\n feed[model.input_placeholder] = [[tokens[-1]]]\n feed[model.dropout_placeholder] = 1\n feed[model.initial_state] = state\n y_pred, state = session.run([model.predictions[-1], model.final_state], feed_dict=feed)\n ### END YOUR CODE\n next_word_idx = sample(y_pred[0], temperature=temp)\n tokens.append(next_word_idx)\n if stop_tokens and model.vocab.decode(tokens[-1]) in stop_tokens:\n break\n output = [model.vocab.decode(word_idx) for word_idx in tokens]\n return output",
"def create_text_sequence_feature(fl, sentence, sentence_len, vocab):\n sentence_transformed = transform_sentence(sentence, vocab)\n for word_id in sentence_transformed:\n fl.feature.add().int64_list.value.extend([word_id])\n return fl",
"def embed_seq(self,X_seq,Y_seq):\n X_embed = -tr.ones(len(X_seq),self.og_signal_dim+self.og_noise_dim)\n # find trials of corresponding types\n pm_trials_bool = X_seq >= self.ntokens_og\n pm_trials = np.where(pm_trials_bool)\n og_trials = np.where(np.logical_not(pm_trials_bool))\n # take signal_dim (time,edim_signal_dim)\n pm_embeds = self.emat_pm[X_seq[pm_trials] - self.ntokens_og] \n og_embeds = self.emat_og[X_seq[og_trials]] \n # make noise (time,edim_noise)\n pm_noise = tr_noise_pm([len(pm_embeds),self.pm_noise_dim])\n og_noise = tr_noise_og([len(og_embeds),self.og_noise_dim])\n # cat signal_dim and noise (time,edim)\n pm_embeds = tr.cat([pm_embeds,pm_noise],-1)\n og_embeds = tr.cat([og_noise,og_embeds],-1)\n # put into respective positions\n X_embed[pm_trials] = pm_embeds\n X_embed[og_trials] = og_embeds \n # include batch dim \n X_embed = tr.unsqueeze(X_embed,1)\n Y_embed = tr.unsqueeze(tr.LongTensor(Y_seq),1)\n return X_embed,Y_embed"
] | [
"0.6890587",
"0.6337464",
"0.6256114",
"0.6213781",
"0.61906195",
"0.6124841",
"0.6122943",
"0.5976476",
"0.5967347",
"0.59281605",
"0.5920156",
"0.59157413",
"0.591105",
"0.5867188",
"0.58647937",
"0.5861155",
"0.58420885",
"0.58300006",
"0.58148724",
"0.5812145",
"0.5800837",
"0.5772145",
"0.5754972",
"0.5753906",
"0.5750292",
"0.5739473",
"0.57206994",
"0.57160264",
"0.57013386",
"0.56924766"
] | 0.80186236 | 0 |
Get BERT embeddings from a dataloader generator. | def _get_bert_embeddings(data_generator, embedding_model: torch.nn.Module, metadata: False):
use_cuda = torch.cuda.is_available()
device = torch.device("cuda:0" if use_cuda else "cpu")
with torch.set_grad_enabled(False):
embeddings = {'ids': [],
'embeddings': [],
'labels': []
}
# get BERT training embeddings
if metadata:
for local_ids, local_data, local_meta, local_labels in data_generator:
local_data, local_meta, local_labels = local_data.to(device).long().squeeze(1), \
local_meta, \
local_labels.to(device).long()
#print(local_data[0].shape)
augmented_embeddings = embedding_model(local_data, local_meta)
embeddings['ids'].extend(np.array(local_ids))
embeddings['embeddings'].extend(np.array(augmented_embeddings.detach().cpu()))
embeddings['labels'].extend(np.array(local_labels.detach().cpu().tolist()))
else:
for local_ids, local_data, local_labels in data_generator:
local_data, local_labels = local_data.to(device).long().squeeze(1), \
local_labels.to(device).long()
#print(local_data[0].shape)
augmented_embeddings = embedding_model(local_data)
embeddings['ids'].extend(np.array(local_ids))
embeddings['embeddings'].extend(np.array(augmented_embeddings.detach().cpu()))
embeddings['labels'].extend(np.array(local_labels.detach().cpu().tolist()))
return embeddings | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_embeddings(model, loader, device=torch.device('cpu')):\n embeddings = []\n labels = []\n for item in loader:\n data, label = item\n data = data.view(-1, 1, data.shape[-1])\n data = data.to(device)\n label = label.to(device)\n output = model(data).squeeze(1)\n\n embedding = output.cpu().data.numpy()\n label = label.cpu().data.numpy()\n embeddings.append(embedding)\n labels.append(label)\n\n embeddings = np.array(embeddings)\n labels = np.array(labels)\n\n return embeddings, labels",
"def get_embeddings(self, data):\n raise NotImplementedError()",
"def load_embeddings(config, name, vocab, training_generator, validation_generator):\n\n # Pickle embeddings should be AGNOSTIC to the name. This is because each pickled embedding is specific to the dataset and transformer.\n # Applies down the road when/if we attempt active learning\n data_name = config['train_file'].split('/')[-1][:-4] # retrieve file name without the extension\n train_embed_pkl_f = os.path.join(config['cache'], data_name + '_' + config['embedding_type'] + '_training_embeddings.p')\n valid_embed_pkl_f = os.path.join(config['cache'], data_name + '_' + config['embedding_type'] + '_validation_embeddings.p')\n \n \n if os.path.exists(train_embed_pkl_f):\n with open( train_embed_pkl_f, 'rb') as cache:\n train_embeddings = pickle.load(cache)\n\n with open(valid_embed_pkl_f, 'rb') as cache:\n valid_embeddings = pickle.load(cache)\n else:\n # get embeddings from scratch\n tokenizer = AutoTokenizer.from_pretrained(vocab)\n embedding_model = AbstractBert(vocab) \n\n if torch.cuda.device_count() > 1:\n print(\"GPUs Available: \", torch.cuda.device_count())\n embedding_model = torch.nn.DataParallel(embedding_model, device_ids=[0, 1, 2])\n \n use_cuda = torch.cuda.is_available()\n device = torch.device(\"cuda:0\" if use_cuda else \"cpu\")\n\n embedding_model.eval().to(device)\n\n logger.info(' Getting BERT/ROBERTA embeddings...')\n\n train_embeddings = _get_bert_embeddings(training_generator, embedding_model, config[\"metadata\"])\n valid_embeddings = _get_bert_embeddings(validation_generator, embedding_model, config[\"metadata\"])\n\n # save embeddings\n pickle.dump(train_embeddings, open(train_embed_pkl_f, 'wb'))\n pickle.dump(valid_embeddings, open(valid_embed_pkl_f, 'wb'))\n\n logger.info(' Saved full BERT/ROBERTA embeddings.')\n\n embedding_shape = train_embeddings['embeddings'][1].shape[0]\n\n return embedding_shape, train_embeddings, valid_embeddings",
"def bert_embed(data, bert_model, BATCH_SIZE = 16, MAX_LEN = 128):\n \n dataset = TensorDataset(\n data['input_ids'], data['attention_masks'], data['indices']\n )\n sampler = SequentialSampler(dataset)\n dataloader = DataLoader(dataset, sampler=sampler, batch_size=BATCH_SIZE)\n \n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n print('Running on ' + device.type)\n if device.type == 'cuda':\n bert_model.cuda() # put bert in training mode\n \n N = data['indices'].shape[0]\n X = np.zeros((N, 768))\n pos = 0\n for batch in dataloader:\n batch = tuple(t.to(device) for t in batch)\n b_input_ids, b_input_masks, b_indices = batch\n \n with torch.no_grad():\n embeddings = bert_model(\n b_input_ids.view(-1, MAX_LEN),\n b_input_masks.view(-1, MAX_LEN)\n )[2]\n # Take the mean of the last 4 hidden states\n embeddings = (embeddings[-4] + embeddings[-3] + embeddings[-2] + embeddings[-1])/4\n for j, label_ind in enumerate(b_indices.cpu().detach().numpy()):\n X[pos,:] = embeddings[j, int(label_ind), :].cpu().detach().numpy()\n pos+=1\n return X",
"def get_loader(sentences, conversation_length, sentence_length, vocab, batch_size=100, data=None, shuffle=True):\n\n def collate_fn(data):\n \"\"\"\n Collate list of data in to batch\n\n Args:\n data: list of tuple(source, target, conversation_length, source_length, target_length)\n Return:\n Batch of each feature\n - source (LongTensor): [batch_size, max_conversation_length, max_source_length]\n - target (LongTensor): [batch_size, max_conversation_length, max_source_length]\n - conversation_length (np.array): [batch_size]\n - source_length (LongTensor): [batch_size, max_conversation_length]\n \"\"\"\n # Sort by conversation length (descending order) to use 'pack_padded_sequence'\n data.sort(key=lambda x: x[1], reverse=True)\n\n # Separate\n sentences, conversation_length, sentence_length = zip(*data)\n\n # return sentences, conversation_length, sentence_length.tolist()\n return sentences, conversation_length, sentence_length\n\n dataset = DialogDataset(sentences, conversation_length,\n sentence_length, vocab, data=data)\n\n data_loader = DataLoader(\n dataset=dataset,\n batch_size=batch_size,\n shuffle=shuffle,\n collate_fn=collate_fn)\n\n return data_loader",
"def test_bert_embedder(self, resource_loader):\n config = {\n \"model_type\": \"tagger\",\n \"example_type\": ENTITY_EXAMPLE_TYPE,\n \"label_type\": ENTITIES_LABEL_TYPE,\n \"model_settings\": {\"classifier_type\": \"embedder\"},\n \"params\": {\"embedder_type\": \"bert\"},\n }\n examples = self.labeled_data.queries()\n labels = self.labeled_data.entities()\n\n config = {\n **config,\n \"params\": {\n \"embedder_type\": \"bert\",\n \"pretrained_model_name_or_path\": \"bert-base-cased\",\n \"add_terminals\": True\n }\n }\n model = ModelFactory.create_model_from_config(ModelConfig(**config))\n model.initialize_resources(resource_loader, examples, labels)\n model.fit(examples, labels)\n model_predictions_assertions(model)\n\n new_config = {**config, \"params\": {**config[\"params\"], \"token_spans_pooling_type\": \"mean\"}}\n model = ModelFactory.create_model_from_config(ModelConfig(**new_config))\n model.initialize_resources(resource_loader, examples, labels)\n model.fit(examples, labels)\n model_predictions_assertions(model)\n\n \"\"\" test for different pretrained transformers\"\"\"\n\n config = {\n **config,\n \"params\": {\n \"embedder_type\": \"bert\",\n \"pretrained_model_name_or_path\": \"distilbert-base-uncased\",\n }\n }\n model = ModelFactory.create_model_from_config(ModelConfig(**config))\n model.initialize_resources(resource_loader, examples, labels)\n model.fit(examples, labels)\n model_predictions_assertions(model)\n\n config = {\n **config,\n \"params\": {\n \"embedder_type\": \"bert\",\n \"pretrained_model_name_or_path\": \"albert-base-v2\",\n }\n }\n model = ModelFactory.create_model_from_config(ModelConfig(**config))\n model.initialize_resources(resource_loader, examples, labels)\n model.fit(examples, labels)\n model_predictions_assertions(model)\n\n config = {\n **config,\n \"params\": {\n \"embedder_type\": \"bert\",\n \"pretrained_model_name_or_path\": \"sentence-transformers/all-mpnet-base-v2\",\n }\n }\n model = ModelFactory.create_model_from_config(ModelConfig(**config))\n model.initialize_resources(resource_loader, examples, labels)\n model.fit(examples, labels)\n model_predictions_assertions(model)\n\n config = {\n **config,\n \"params\": {\n \"embedder_type\": \"bert\",\n \"pretrained_model_name_or_path\": \"roberta-base\",\n }\n }\n with pytest.raises(NotImplementedError):\n model = ModelFactory.create_model_from_config(ModelConfig(**config))\n model.initialize_resources(resource_loader, examples, labels)\n model.fit(examples, labels)\n model_predictions_assertions(model)",
"def build_bert_input(data, data_path, tokenizer):\n\n cache_fp = f\"{data_path[:data_path.rfind('.')]}_{type(tokenizer).__name__}_{str(BERT_MAX_LEN)}_cache\"\n if os.path.isfile(cache_fp): \n logger.info(\"Loading tokenized data from cache...\")\n all_samples = torch.load(cache_fp)\n return all_samples\n\n bert_sequences = [] \n\n # modification for turn classification task \n if 'turn' in data_path:\n for instance in data:\n seq = \"[CLS] {} [SEP] {} [SEP]\".format(instance['p'], instance['r'])\n bert_sequences.append([instance['label'], seq])\n\n # regular yes-and classifier \n else: \n \n for k in data['non-yesands'].keys():\n for non_yesand in data['non-yesands'][k]: \n seq = \"[CLS] {} [SEP] {} [SEP]\".format(non_yesand['p'], non_yesand['r'])\n bert_sequences.append([0, seq])\n \n for k in data['yesands'].keys(): \n for yesand in data['yesands'][k]: \n seq = \"[CLS] {} [SEP] {} [SEP]\".format(yesand['p'], yesand['r'])\n bert_sequences.append([1, seq])\n\n sentences = [x[1] for x in bert_sequences]\n labels = [x[0] for x in bert_sequences]\n logger.info(\"Tokenizing loaded data...\")\n tokenized_texts = [tokenizer.encode(sentence) for sentence in sentences]\n\n\n # cache_fp = data_path[:data_path.rfind('.')] + \"_\" + type(tokenizer).__name__\n # if os.path.isfile(cache_fp): \n # logger.info(\"Loading tokenized data from cache...\")\n # tokenized_texts = torch.load(cache_fp)\n # else: \n # logger.info(\"Tokenizing loaded data...\")\n # # tokenize with BERT tokenizer \n # tokenized_texts = [tokenizer.encode(sentence) for sentence in sentences]\n # torch.save(tokenized_texts, cache_fp)\n\n\n\n # pad input to MAX_LEN\n input_ids = pad_sequences(tokenized_texts, maxlen=BERT_MAX_LEN, dtype=\"long\", truncating=\"post\", padding=\"post\")\n\n # get attention masks and segment ids \n attention_masks = build_attention_mask(input_ids)\n segment_ids = build_segment_ids(input_ids)\n\n all_samples = [{\"input_ids\": input_ids[i], \"token_type_ids\": segment_ids[i], \"attention_mask\": attention_masks[i], \"label\": labels[i]} for i in range(len(input_ids))]\n torch.save(all_samples, cache_fp)\n\n return all_samples",
"def _get_embedding(self, data):\n embedding_list = [super()._get_embedding(data)]\n context = data['context']\n for i in range(context.shape[1]):\n embedding_list.append(getattr(self, f'context{i}')(context[:, i:i+1]))\n return torch.cat(embedding_list, dim=1)",
"def load_embeddings():\n return embedding_utils.PretrainedWordEmbeddings(\n lowercase=FLAGS.lowercase,\n embeddings_path=FLAGS.fasttext_embeddings,\n max_vocab_size=FLAGS.max_vocab_size,\n skip_header=True)",
"def __create_dataloaders(\n self, encoded_input: dict, batch_size\n ) -> Tuple[DataLoader, DataLoader, DataLoader]:\n input_ids = encoded_input[\"input_ids\"]\n token_type_ids = encoded_input[\"token_type_ids\"]\n attention_mask = encoded_input[\"attention_mask\"]\n\n input_ids_data_loader = torch.utils.data.DataLoader(\n input_ids, batch_size=batch_size, shuffle=False\n )\n token_type_ids_data_loader = torch.utils.data.DataLoader(\n token_type_ids, batch_size=batch_size, shuffle=False\n )\n attention_mask_data_loader = torch.utils.data.DataLoader(\n attention_mask, batch_size=batch_size, shuffle=False\n )\n\n return (\n input_ids_data_loader,\n token_type_ids_data_loader,\n attention_mask_data_loader,\n )",
"def get_data(self):\n if self.with_encoder:\n for i in count():\n batchdata = pd.read_csv(SEQUENTIAL_TRAIN_PATH,\n nrows=GAN_BATCH_SIZE,\n skiprows=i * GAN_BATCH_SIZE + 1,\n names=SEQUENTIAL_COLUMN_NAMES.keys(),\n dtype=SEQUENTIAL_COLUMN_NAMES)\n if len(batchdata) < GAN_BATCH_SIZE:\n yield None\n batchdata = batchdata['seq_contents'].values\n yield get_data_for_lstm_ae(batchdata)\n else:\n # shuffles data\n self.encoded_data = self.encoded_data[np.random.permutation(self.encoded_data.shape[0])]\n for i in count():\n result = self.encoded_data[i*GAN_BATCH_SIZE:(i+1)*GAN_BATCH_SIZE,:]\n if result.shape[0] < GAN_BATCH_SIZE:\n yield None\n yield result",
"def generate_embeddings_gen(dataset_path, classes):\n model = embeddings(INPUT_DIM)\n X_train, X_test, y_train, y_test = get_train_test_lists(dataset_path, classes)\n # create data generators\n batch_size = 16\n train_batch_generator = image_batch_generator(X_train, model, batch_size=batch_size)\n test_batch_generator = image_batch_generator(X_test, model, batch_size=batch_size)\n\n return train_batch_generator, test_batch_generator",
"def extract_embeddings(ds, config):\n from lidbox.models.keras_utils import KerasWrapper\n\n extractors = [(KerasWrapper.from_config_as_embedding_extractor_fn(e), _get_device_or_default(e))\n for e in config[\"extractors\"]]\n # ConcreteFunctions will be pretty-formatted starting from TF 2.3\n # https://www.tensorflow.org/guide/concrete_function#changes_for_tensorflow_23\n logger.info(\"Using %d extractors:\\n %s\",\n len(extractors),\n '\\n '.join(\"on device '{:s}':\\n {}\".format(d, _left_pad_lines(str(e), 2)) for e, d in extractors))\n\n def _append_embeddings(x):\n embeddings = []\n for extractor_fn, device in extractors:\n with tf.device(device):\n embeddings.append(extractor_fn(x[\"input\"]))\n return dict(x, embedding=tf.concat(embeddings, axis=1))\n\n batch_size = tf.constant(config.get(\"batch_size\", 1), tf.int64)\n logger.info(\"Batching inputs with batch size %s, extracting embeddings in batches.\", batch_size.numpy())\n ds = (ds.batch(batch_size)\n .prefetch(TF_AUTOTUNE)\n .map(_append_embeddings, num_parallel_calls=TF_AUTOTUNE))\n\n if not config.get(\"no_unbatch\", False):\n logger.info(\"Unbatching after embedding extraction\")\n ds = ds.unbatch()\n\n return ds",
"def embeddings(self):\n self._ensure_is_connected()\n return self._embeddings",
"def pickle_embeddings(model, pickle_name, dataset_name: str, training_ds_generator=None):\n import pickle\n layer_name = 'embedding'\n encoder_model = Model(inputs=model.input,\n outputs=model.get_layer(layer_name).output)\n if training_ds_generator is None:\n training_ds_generator = data_genetator.MYGenerator('train', batch_size=100)\n\n training_labels = np.copy(training_ds_generator.gt)\n embeddings = encoder_model.predict_generator(training_ds_generator)\n\n os.makedirs(f'./embeddings/{dataset_name}', exist_ok=True)\n\n with open(f'./embeddings/{dataset_name}/embeddings_for_{pickle_name}.pkl', 'wb') as pkl_out:\n pickle.dump((embeddings, training_labels), pkl_out)",
"def get_embeddings() -> tuple:\n # Initialize the model loading Universal Sentense Encoder\n # into a KerasLayer from Kaggle dataset file\n model = tf.keras.Sequential(\n [KerasLayer(encoder_path, input_shape=[], dtype=tf.string,\n output_shape=[512], trainable=False),\n # tf.keras.layers.Layer(512, dtype=tf.float16) # To reduce memory footprint\n ]\n )\n\n train_emb = model.predict(data_train['text'])\n print('Train texts converted into embeddings. Shape:', train_emb.shape)\n\n test_emb = model.predict(data_test['text'])\n print('Test texts converted into embeddings. Shape:', test_emb.shape)\n\n return train_emb, test_emb",
"def generate_embeddings_sentence_test_data(data, path_out):\n flair.device = torch.device('cpu')\n dicts = []\n # init multilingual BERT\n bert_embedding = TransformerDocumentEmbeddings('bert-base-multilingual-cased')\n counter = 0\n for entry in data:\n print(\"Counter: \", counter)\n counter += 1\n text = entry[\"sentence\"]\n id = entry[\"id\"]\n sent = Sentence(text)\n bert_embedding.embed(sent)\n vec = sent.get_embedding().detach().numpy()\n dicts.append((id,vec))\n gc.collect()\n result = dicts\n file = open(path_out, \"wb\")\n pickle.dump(result, file)\n file.close()\n return result",
"def get_embeddings():\n # Load the raw embedding data\n X_train = np.load('./train_embeddings.npy')\n \n y_train = np.load('./train_labels.npy')\n \n X_valid = np.load('./valid_embeddings.npy')\n \n y_valid = np.load('./valid_labels.npy')\n \n X_test = np.load('./test_embeddings.npy')\n \n y_test = np.load('./test_labels.npy')\n\n #return X_train, y_train\n return X_train, y_train, X_valid, y_valid, X_test, y_test",
"def get_loader(data_list, config, train=True):\n \n with open(config[\"label_map\"], \"r\") as f:\n label_map = json.load(f)\n\n dataset = GoogleSpeechDataset(\n data_list=data_list,\n label_map=label_map,\n audio_settings=config[\"hparams\"][\"audio\"],\n aug_settings=config[\"hparams\"][\"augment\"] if train else None,\n cache=config[\"exp\"][\"cache\"]\n )\n\n dataloader = DataLoader(\n dataset,\n batch_size=config[\"hparams\"][\"batch_size\"],\n num_workers=config[\"exp\"][\"n_workers\"],\n pin_memory=config[\"exp\"][\"pin_memory\"],\n shuffle=True if train else False\n )\n\n return dataloader",
"def get_dataloaders(data_dir,train_batch_size,val_batch_size,aug_flag):\n # Create the dataset object.\n transformed_dataset = PersonDataset(data_dir,False)\n # dataloader for train and validation\n validation_split = 0.2\n shuffle_dataset = True\n #random seed to keep the train-val split constant for inference purpose\n random_seed= 42\n # create indices for training and validation splits.\n dataset_size = len(transformed_dataset)\n # we create the indices using python range function and store it into a list\n indices = list(range(dataset_size))\n split = int(np.floor(validation_split*dataset_size))\n if shuffle_dataset:\n np.random.seed(random_seed)\n np.random.shuffle(indices)\n train_indices,val_indices = indices[split:],indices[:split]\n # create dataloaders...\n train_sampler = SubsetRandomSampler(train_indices)\n val_sampler = SubsetRandomSampler(val_indices)\n train_aug,val_aug = aug_flag,False\n train_loader = DataLoader(PersonDataset(data_dir,train_aug), batch_size=train_batch_size, shuffle=False, num_workers=0,sampler = train_sampler)\n val_loader = DataLoader(PersonDataset(data_dir,val_aug), batch_size=val_batch_size, shuffle=False, num_workers=0,sampler = val_sampler)\n\n # dictionary for data loaders..\n dataloaders = {\"train\" :train_loader,\n \"val\":val_loader\n }\n return dataloaders",
"def batch_gen():\n i = 0\n while len(all_sentences) - i >= batch_size:\n # TODO this is a mess...\n yield np.stack([\n np.pad(\n np.stack(\n [embeddings[id]\n for id in sentence[:max_sentence_length]]), [[\n 0, max_sentence_length -\n min(len(sentence), max_sentence_length)\n ], [0, 0]],\n 'constant',\n constant_values=0)\n for sentence in all_sentences[i:i + batch_size]\n ])\n\n i += batch_size",
"def load_batched_dataset(is_train, embeddings):\n tensorize_text_fn = build_tensorize_text_fn(embeddings)\n unbatched = load_data(is_train)\n\n def tensorize(x):\n x[\"premise\"] = tensorize_text_fn(x[\"premise\"])\n x[\"hypothesis\"] = tensorize_text_fn(x[\"hypothesis\"])\n return x\n\n unbatched = unbatched.map(tensorize)\n\n hist_bins = list(range(5, 500, 5))\n batched = unbatched.apply(\n ops.bucket_by_quantiles(lambda x: x[\"premise\"][\"len\"], FLAGS.batch_size,\n 10, hist_bins))\n if is_train:\n batched = batched.shuffle(1000, reshuffle_each_iteration=True)\n batched = batched.repeat()\n\n # Get (features, label) format for tf.estimator\n return batched.map(lambda x: (x, x[\"label\"]))",
"def get_data_loaders_2sentences():\n dataset_path = \"\"\n dataset_cache = None\n personachat = get_dataset(dataset_path, dataset_cache)\n\n tokenizer_selected = OpenAIGPTTokenizer.from_pretrained('openai-gpt')\n logger.info(\"Build inputs and labels\")\n datasets = {\"train\": defaultdict(list), \"valid\": defaultdict(list)}\n personality = []\n history_complete = []\n count_persona = 0\n with open('data_faiss_pegasus_2generated.pkl', 'rb') as f:\n persona_selected_list = pickle.load(f)\n for dataset_name, dataset in personachat.items():\n num_candidates = len(dataset[0][\"utterances\"][0][\"candidates\"])\n if num_candidates > 0 and dataset_name == 'train':\n num_candidates = min(1, num_candidates)\n for dialog in dataset:\n persona = dialog[\"persona_info\"].copy()\n #datasets[personality].append(persona)\n count_history = 0\n for utterance in dialog[\"utterances\"]:\n count_history = count_history + 1\n history = utterance[\"history\"][-(2*2+1):]\n #history_complete.append(history)\n if len(history) > 4:\n history_chatbot = history[1::2]\n\n persona_selected = persona_selected_list[count_persona]\n instance = build_input_from_segments_faiss_2(persona_selected, history_chatbot) \n for input_name, input_array in instance.items():\n datasets[dataset_name][input_name].append(input_array)\n count_persona = count_persona + 1\n return datasets",
"def dataloader(self):\n\n # load / split data\n train_data = self.data.get_train_data()\n if self.args.use_dev:\n train_data, dev_data = self.data.split_data(train_data)\n test_data = self.data.get_test_data()\n\n #print(train_data[0])\n #print(dev_data[0])\n #print(test_data[0])\n\n # build dataset\n train_dataset = self.loader.build_dataset(\n train_data, \n self.args.train_max_seq_len)\n train_loader = self.loader.build_dataloader(\n train_dataset, 'train')\n\n test_dataset = self.loader.build_dataset(\n test_data,\n self.args.eval_max_seq_len)\n test_loader = self.loader.build_dataloader(\n test_dataset, 'test')\n\n if self.args.use_dev:\n dev_dataset = self.loader.build_dataset(\n dev_data,\n self.args.eval_max_seq_len)\n dev_loader = self.loader.build_dataloader(\n dev_dataset, 'dev')\n return train_loader, dev_loader, test_loader\n else:\n return train_loader, test_loader",
"def get_dataloaders(self,\n dataset_locations_dict,\n batch_size=32,\n test_only=False):\n # if test_only:\n # test_dataset = TweetSentimentDataset(csv_path=dataset_locations_dict[\"TEST\"],\n # transform=None,\n # freq_threshold=5,\n # vocab_file=dataset_locations_dict[\"VOCAB\"],\n # create_vocab=False)\n # return get_dataloader(test_dataset,\n # test_dataset.vocab,\n # batch_size=1,shuffle=False,num_workers=0,\n # add_collate_fn=True)\n \n train_val_dataset = TweetSentimentDataset(csv_path=dataset_locations_dict[\"TRAIN_TEST\"],\n transform=None,\n freq_threshold=5,\n vocab_file=dataset_locations_dict[\"VOCAB\"],\n create_vocab=False)\n \n # test_dataset = TweetSentimentDataset(csv_path=dataset_locations_dict[\"TEST\"],\n # transform=None,\n # freq_threshold=5,\n # vocab_file=dataset_locations_dict[\"VOCAB\"],\n # create_vocab=False)\n \n train_ds_len = int(0.9*len(train_val_dataset))\n \n val_ds_len = int(0.05*len(train_val_dataset))\n \n test_ds_len = len(train_val_dataset)-train_ds_len-val_ds_len\n \n train_dataset,val_dataset,test_dataset = random_split(train_val_dataset,\n lengths=[train_ds_len,val_ds_len,test_ds_len],\n generator=torch.Generator().manual_seed(seed))\n \n train_dataloader = get_dataloader(train_dataset,\n train_val_dataset.vocab,\n batch_size=batch_size,shuffle=True,num_workers=0,\n add_collate_fn=True)\n val_dataloader = get_dataloader(val_dataset,\n train_val_dataset.vocab,\n batch_size=batch_size,shuffle=False,num_workers=0,\n add_collate_fn=True)\n test_dataloader = get_dataloader(test_dataset,\n train_val_dataset.vocab,\n batch_size=batch_size,shuffle=False,num_workers=0,\n add_collate_fn=True)\n \n # test_dataset.df.to_csv('sentiment_analysis_test_dataset_4990.csv')\n print(f\"Training Dataset size : {len(train_dataset)}\\n\")\n print(f\"Validation Dataset size : {len(val_dataset)}\\n\")\n print(f\"Test Dataset size : {len(test_dataset)}\\n\")\n \n if test_only:\n return test_dataloader\n return train_dataloader,val_dataloader,test_dataloader",
"def generate_sentence_embeddings():\n generate_embeddings_sentence(\"Data/en-train.json\", \"Data_Sent_Embds/en_sent.pkl\")\n generate_embeddings_sentence(\"Data/es-train.json\", \"Data_Sent_Embds/es_sent.pkl\")\n generate_embeddings_sentence(\"Data/pr-train.json\", \"Data_Sent_Embds/pr_sent.pkl\")",
"def sample_for_inception(model, encoder, batch_size, dataloader, device):\n\n captions = []\n gen_imgs = []\n # get sample captions\n done = False\n while not done:\n for (_, labels_batch, captions_batch) in dataloader:\n captions += captions_batch\n conditional_embeddings = encoder(labels_batch.to(device), captions)\n imgs = model.sample(conditional_embeddings).cpu()\n gen_imgs.append(imgs)\n\n if len(captions) > batch_size:\n done = True\n break\n\n gen_imgs = torch.cat(gen_imgs).numpy()\n gen_imgs = np.clip(gen_imgs, 0, 1)\n return(gen_imgs)",
"def embed(documents, ctx_encoder, ctx_tokenizer, device):\n input_ids = ctx_tokenizer(\n documents[\"title\"],\n documents[\"text\"],\n truncation=True,\n padding=\"longest\",\n return_tensors=\"pt\",\n )[\"input_ids\"]\n embeddings = ctx_encoder(\n input_ids.to(device=device), return_dict=True\n ).pooler_output\n return {\"embeddings\": embeddings.detach().cpu().numpy()}",
"def get_features(self) -> Generator[np.ndarray, None, None]:\n for text in self.texts:\n yield embed(text)",
"def load_training_data_generator(self) -> Generator[Tuple[List[np.ndarray], np.ndarray], None, None]:\n return self._load_generator(config.TRAIN_DIR, True)"
] | [
"0.6092691",
"0.60436624",
"0.60151154",
"0.59234816",
"0.59059787",
"0.5782936",
"0.5690828",
"0.5666181",
"0.5597089",
"0.5583591",
"0.5570702",
"0.55484855",
"0.55165946",
"0.5495378",
"0.5463972",
"0.5451432",
"0.54469055",
"0.54453945",
"0.5405851",
"0.54010916",
"0.53670686",
"0.5364566",
"0.5364171",
"0.53459024",
"0.5345126",
"0.5336129",
"0.5332505",
"0.5328814",
"0.5319814",
"0.53138673"
] | 0.73473763 | 0 |